instance_id
stringlengths 65
198
| instance_type
stringclasses 2
values | task_type
stringclasses 1
value | repo_url
stringclasses 21
values | commit
stringlengths 40
40
| fm_type
stringclasses 2
values | fm_name
stringlengths 1
93
⌀ | pyfile_name
stringlengths 5
61
| original_code
stringlengths 18
106k
| gold_code
stringlengths 17
356k
| old_filtered_context
stringlengths 24
362k
| old_complete_context
stringlengths 93
363k
| new_filtered_context
stringlengths 11
315k
| new_complete_context
stringlengths 157
367k
| initial_error_log
stringlengths 146
49.4M
⌀ | gold_summary
stringclasses 723
values | original_summary
stringlengths 123
141
| pyfile_path
stringlengths 18
101
| unittest_path
stringlengths 18
101
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1_fastapi_callee__utils__is_body_allowed_for_status_code__c43120258fa89bc20d6f8ee671b6ead9ab223fc7 | callee | fail-to-pass | https://github.com/fastapi/fastapi | c43120258fa89bc20d6f8ee671b6ead9ab223fc7 | function | is_body_allowed_for_status_code | utils.py | def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 304})
| def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 304})
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo()
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_sql_databases/test_testing_databases.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_sql_databases/test_testing_databases.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_sql_databases/test_testing_databases.py:7: in <module>
from ...utils import needs_pydanticv1
tests/utils.py:4: in <module>
from fastapi._compat import PYDANTIC_V2
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_sql_databases/test_testing_databases.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.62s ===============================
| {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 1, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_sql_databases/test_testing_databases.py |
1_fastapi_callee__utils__get_path_param_names__b9d912c6380661647b51c322820fcb4ba08f32d2 | callee | fail-to-pass | https://github.com/fastapi/fastapi | b9d912c6380661647b51c322820fcb4ba08f32d2 | function | get_path_param_names | utils.py | def get_path_param_names(path: str):
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
| def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
| import re
from typing import Dict, Sequence, Set, Type
from starlette.routing import BaseRoute
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseModel
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
def get_flat_models_from_routes(routes: Sequence[BaseRoute]):
body_fields_from_routes = []
responses_from_routes = []
for route in routes:
if route.include_in_schema and isinstance(route, routing.APIRoute):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
):
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
| import re
from typing import Dict, Sequence, Set, Type
from starlette.routing import BaseRoute
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseModel
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
def get_flat_models_from_routes(routes: Sequence[BaseRoute]):
body_fields_from_routes = []
responses_from_routes = []
for route in routes:
if route.include_in_schema and isinstance(route, routing.APIRoute):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
):
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str):
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_sql_databases/test_testing_databases.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_sql_databases/test_testing_databases.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_sql_databases/test_testing_databases.py:7: in <module>
from ...utils import needs_pydanticv1
tests/utils.py:4: in <module>
from fastapi._compat import PYDANTIC_V2
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_sql_databases/test_testing_databases.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.62s ===============================
| {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 1, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_sql_databases/test_testing_databases.py |
1_fastapi_callee__utils__create_cloned_field__aa84ac8e3e305d76e71c0c1b31237517948fa12f | callee | fail-to-pass | https://github.com/fastapi/fastapi | aa84ac8e3e305d76e71c0c1b31237517948fa12f | function | create_cloned_field | utils.py | def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(
routes: Sequence[Type[BaseRoute]]
) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
default = None,
required = False,
model_config = BaseConfig,
schema = Schema(None),
)
new_field.has_alias= field.has_alias
new_field.alias= field.alias
new_field.class_validators= field.class_validators
new_field.default= field.default
new_field.required= field.required
new_field.model_config= field.model_config
new_field.schema= field.schema
new_field.allow_none= field.allow_none
new_field.validate_always= field.validate_always
if field.sub_fields:
new_field.sub_fields= [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(
routes: Sequence[Type[BaseRoute]]
) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_sql_databases/test_testing_databases.py _
tests/test_tutorial/test_sql_databases/test_testing_databases.py:7: in <module>
from ...utils import needs_pydanticv1
tests/utils.py:4: in <module>
from fastapi._compat import PYDANTIC_V2
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
fastapi/utils.py:109: in <module>
def create_cloned_field(field: Field) -> Field:
E NameError: name 'Field' is not defined
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_sql_databases/test_testing_databases.py - Name...
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.62s ===============================
| {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 1, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_sql_databases/test_testing_databases.py |
1_fastapi_callee__utils__generate_operation_id_for_path__687065509b047a642acd1d281d6d68c6698509b1 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 687065509b047a642acd1d281d6d68c6698509b1 | function | generate_operation_id_for_path | utils.py | def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(routes: Sequence[BaseRoute]) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(routes: Sequence[BaseRoute]) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_sql_databases/test_testing_databases.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_sql_databases/test_testing_databases.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_sql_databases/test_testing_databases.py:7: in <module>
from ...utils import needs_pydanticv1
tests/utils.py:4: in <module>
from fastapi._compat import PYDANTIC_V2
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_sql_databases/test_testing_databases.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.64s ===============================
| {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 1, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_sql_databases/test_testing_databases.py |
1_fastapi_callee__utils__generate_unique_id__8a0d4c79c18e6bd219b965b9d50578c139e7efd8 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 8a0d4c79c18e6bd219b965b9d50578c139e7efd8 | function | generate_unique_id | utils.py | def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
| def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_sql_databases/test_testing_databases.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_sql_databases/test_testing_databases.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_sql_databases/test_testing_databases.py:7: in <module>
from ...utils import needs_pydanticv1
tests/utils.py:4: in <module>
from fastapi._compat import PYDANTIC_V2
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_sql_databases/test_testing_databases.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.62s ===============================
| {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 1, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_sql_databases/test_testing_databases.py |
1_fastapi_callee__utils__deep_dict_update__181a32236a32305788b87d10e7990d9564ae2056 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 181a32236a32305788b87d10e7990d9564ae2056 | function | deep_dict_update | utils.py | def deep_dict_update(main_dict: dict, update_dict: dict) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
| def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
| import functools
import re
from dataclasses import is_dataclass
from enum import Enum
from typing import Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.logger import logger
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
try:
from pydantic.fields import FieldInfo, ModelField, UndefinedType
PYDANTIC_1 = True
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic.fields import Field as ModelField # type: ignore
from pydantic import Schema as FieldInfo # type: ignore
class UndefinedType: # type: ignore
def __repr__(self) -> str:
return "PydanticUndefined"
logger.warning(
"Pydantic versions < 1.0.0 are deprecated in FastAPI and support will be "
"removed soon."
)
PYDANTIC_1 = False
# TODO: remove when removing support for Pydantic < 1.0.0
def get_field_info(field: ModelField) -> FieldInfo:
if PYDANTIC_1:
return field.field_info # type: ignore
else:
return field.schema # type: ignore # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
def warning_response_model_skip_defaults_deprecated() -> None:
logger.warning( # pragma: nocover
"response_model_skip_defaults has been deprecated in favor of "
"response_model_exclude_unset to keep in line with Pydantic v1, support for "
"it will be removed soon."
)
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
# ignore mypy error until enum schemas are released
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX # type: ignore
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
if PYDANTIC_1:
return response_field(field_info=field_info)
else: # pragma: nocover
return response_field(schema=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField, *, cloned_types: Dict[Type[BaseModel], Type[BaseModel]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__ # type: ignore
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
if PYDANTIC_1:
new_field.field_info = field.field_info
else: # pragma: nocover
new_field.schema = field.schema # type: ignore
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
if PYDANTIC_1:
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
else: # pragma: nocover
new_field.whole_pre_validators = field.whole_pre_validators # type: ignore
new_field.whole_post_validators = field.whole_post_validators # type: ignore
new_field.parse_json = field.parse_json
new_field.shape = field.shape
try:
new_field.populate_validators()
except AttributeError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
new_field._populate_validators() # type: ignore
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
| import functools
import re
from dataclasses import is_dataclass
from enum import Enum
from typing import Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.logger import logger
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
try:
from pydantic.fields import FieldInfo, ModelField, UndefinedType
PYDANTIC_1 = True
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic.fields import Field as ModelField # type: ignore
from pydantic import Schema as FieldInfo # type: ignore
class UndefinedType: # type: ignore
def __repr__(self) -> str:
return "PydanticUndefined"
logger.warning(
"Pydantic versions < 1.0.0 are deprecated in FastAPI and support will be "
"removed soon."
)
PYDANTIC_1 = False
# TODO: remove when removing support for Pydantic < 1.0.0
def get_field_info(field: ModelField) -> FieldInfo:
if PYDANTIC_1:
return field.field_info # type: ignore
else:
return field.schema # type: ignore # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
def warning_response_model_skip_defaults_deprecated() -> None:
logger.warning( # pragma: nocover
"response_model_skip_defaults has been deprecated in favor of "
"response_model_exclude_unset to keep in line with Pydantic v1, support for "
"it will be removed soon."
)
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
# ignore mypy error until enum schemas are released
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX # type: ignore
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
if PYDANTIC_1:
return response_field(field_info=field_info)
else: # pragma: nocover
return response_field(schema=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField, *, cloned_types: Dict[Type[BaseModel], Type[BaseModel]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__ # type: ignore
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
if PYDANTIC_1:
new_field.field_info = field.field_info
else: # pragma: nocover
new_field.schema = field.schema # type: ignore
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
if PYDANTIC_1:
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
else: # pragma: nocover
new_field.whole_pre_validators = field.whole_pre_validators # type: ignore
new_field.whole_post_validators = field.whole_post_validators # type: ignore
new_field.parse_json = field.parse_json
new_field.shape = field.shape
try:
new_field.populate_validators()
except AttributeError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
new_field._populate_validators() # type: ignore
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def deep_dict_update(main_dict: dict, update_dict: dict) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_sql_databases/test_testing_databases.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_sql_databases/test_testing_databases.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_sql_databases/test_testing_databases.py:7: in <module>
from ...utils import needs_pydanticv1
tests/utils.py:4: in <module>
from fastapi._compat import PYDANTIC_V2
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_sql_databases/test_testing_databases.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.63s ===============================
| {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 1, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_sql_databases/test_testing_databases.py |
1_fastapi_callee__utils__is_body_allowed_for_status_code__c43120258fa89bc20d6f8ee671b6ead9ab223fc7 | callee | fail-to-pass | https://github.com/fastapi/fastapi | c43120258fa89bc20d6f8ee671b6ead9ab223fc7 | function | is_body_allowed_for_status_code | utils.py | def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 304})
| def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 304})
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo()
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_sql_databases/test_sql_databases_py310.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_sql_databases/test_sql_databases_py310.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_sql_databases/test_sql_databases_py310.py:7: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_sql_databases/test_sql_databases_py310.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.65s ===============================
| {'total': 7, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 7, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_sql_databases/test_sql_databases_py310.py |
1_fastapi_callee__utils__get_path_param_names__b9d912c6380661647b51c322820fcb4ba08f32d2 | callee | fail-to-pass | https://github.com/fastapi/fastapi | b9d912c6380661647b51c322820fcb4ba08f32d2 | function | get_path_param_names | utils.py | def get_path_param_names(path: str):
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
| def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
| import re
from typing import Dict, Sequence, Set, Type
from starlette.routing import BaseRoute
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseModel
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
def get_flat_models_from_routes(routes: Sequence[BaseRoute]):
body_fields_from_routes = []
responses_from_routes = []
for route in routes:
if route.include_in_schema and isinstance(route, routing.APIRoute):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
):
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
| import re
from typing import Dict, Sequence, Set, Type
from starlette.routing import BaseRoute
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseModel
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
def get_flat_models_from_routes(routes: Sequence[BaseRoute]):
body_fields_from_routes = []
responses_from_routes = []
for route in routes:
if route.include_in_schema and isinstance(route, routing.APIRoute):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
):
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str):
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_sql_databases/test_sql_databases_py310.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_sql_databases/test_sql_databases_py310.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_sql_databases/test_sql_databases_py310.py:7: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_sql_databases/test_sql_databases_py310.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.63s ===============================
| {'total': 7, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 7, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_sql_databases/test_sql_databases_py310.py |
1_fastapi_callee__utils__create_cloned_field__aa84ac8e3e305d76e71c0c1b31237517948fa12f | callee | fail-to-pass | https://github.com/fastapi/fastapi | aa84ac8e3e305d76e71c0c1b31237517948fa12f | function | create_cloned_field | utils.py | def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(
routes: Sequence[Type[BaseRoute]]
) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
default = None,
required = False,
model_config = BaseConfig,
schema = Schema(None),
)
new_field.has_alias= field.has_alias
new_field.alias= field.alias
new_field.class_validators= field.class_validators
new_field.default= field.default
new_field.required= field.required
new_field.model_config= field.model_config
new_field.schema= field.schema
new_field.allow_none= field.allow_none
new_field.validate_always= field.validate_always
if field.sub_fields:
new_field.sub_fields= [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(
routes: Sequence[Type[BaseRoute]]
) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_sql_databases/test_sql_databases_py310.py _
tests/test_tutorial/test_sql_databases/test_sql_databases_py310.py:7: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
fastapi/utils.py:109: in <module>
def create_cloned_field(field: Field) -> Field:
E NameError: name 'Field' is not defined
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_sql_databases/test_sql_databases_py310.py - Na...
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.64s ===============================
| {'total': 7, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 7, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_sql_databases/test_sql_databases_py310.py |
1_fastapi_callee__utils__generate_operation_id_for_path__687065509b047a642acd1d281d6d68c6698509b1 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 687065509b047a642acd1d281d6d68c6698509b1 | function | generate_operation_id_for_path | utils.py | def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(routes: Sequence[BaseRoute]) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(routes: Sequence[BaseRoute]) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_sql_databases/test_sql_databases_py310.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_sql_databases/test_sql_databases_py310.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_sql_databases/test_sql_databases_py310.py:7: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_sql_databases/test_sql_databases_py310.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.64s ===============================
| {'total': 7, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 7, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_sql_databases/test_sql_databases_py310.py |
1_fastapi_callee__utils__generate_unique_id__8a0d4c79c18e6bd219b965b9d50578c139e7efd8 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 8a0d4c79c18e6bd219b965b9d50578c139e7efd8 | function | generate_unique_id | utils.py | def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
| def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_sql_databases/test_sql_databases_py310.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_sql_databases/test_sql_databases_py310.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_sql_databases/test_sql_databases_py310.py:7: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_sql_databases/test_sql_databases_py310.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.64s ===============================
| {'total': 7, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 7, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_sql_databases/test_sql_databases_py310.py |
1_fastapi_callee__utils__deep_dict_update__181a32236a32305788b87d10e7990d9564ae2056 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 181a32236a32305788b87d10e7990d9564ae2056 | function | deep_dict_update | utils.py | def deep_dict_update(main_dict: dict, update_dict: dict) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
| def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
| import functools
import re
from dataclasses import is_dataclass
from enum import Enum
from typing import Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.logger import logger
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
try:
from pydantic.fields import FieldInfo, ModelField, UndefinedType
PYDANTIC_1 = True
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic.fields import Field as ModelField # type: ignore
from pydantic import Schema as FieldInfo # type: ignore
class UndefinedType: # type: ignore
def __repr__(self) -> str:
return "PydanticUndefined"
logger.warning(
"Pydantic versions < 1.0.0 are deprecated in FastAPI and support will be "
"removed soon."
)
PYDANTIC_1 = False
# TODO: remove when removing support for Pydantic < 1.0.0
def get_field_info(field: ModelField) -> FieldInfo:
if PYDANTIC_1:
return field.field_info # type: ignore
else:
return field.schema # type: ignore # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
def warning_response_model_skip_defaults_deprecated() -> None:
logger.warning( # pragma: nocover
"response_model_skip_defaults has been deprecated in favor of "
"response_model_exclude_unset to keep in line with Pydantic v1, support for "
"it will be removed soon."
)
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
# ignore mypy error until enum schemas are released
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX # type: ignore
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
if PYDANTIC_1:
return response_field(field_info=field_info)
else: # pragma: nocover
return response_field(schema=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField, *, cloned_types: Dict[Type[BaseModel], Type[BaseModel]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__ # type: ignore
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
if PYDANTIC_1:
new_field.field_info = field.field_info
else: # pragma: nocover
new_field.schema = field.schema # type: ignore
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
if PYDANTIC_1:
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
else: # pragma: nocover
new_field.whole_pre_validators = field.whole_pre_validators # type: ignore
new_field.whole_post_validators = field.whole_post_validators # type: ignore
new_field.parse_json = field.parse_json
new_field.shape = field.shape
try:
new_field.populate_validators()
except AttributeError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
new_field._populate_validators() # type: ignore
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
| import functools
import re
from dataclasses import is_dataclass
from enum import Enum
from typing import Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.logger import logger
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
try:
from pydantic.fields import FieldInfo, ModelField, UndefinedType
PYDANTIC_1 = True
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic.fields import Field as ModelField # type: ignore
from pydantic import Schema as FieldInfo # type: ignore
class UndefinedType: # type: ignore
def __repr__(self) -> str:
return "PydanticUndefined"
logger.warning(
"Pydantic versions < 1.0.0 are deprecated in FastAPI and support will be "
"removed soon."
)
PYDANTIC_1 = False
# TODO: remove when removing support for Pydantic < 1.0.0
def get_field_info(field: ModelField) -> FieldInfo:
if PYDANTIC_1:
return field.field_info # type: ignore
else:
return field.schema # type: ignore # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
def warning_response_model_skip_defaults_deprecated() -> None:
logger.warning( # pragma: nocover
"response_model_skip_defaults has been deprecated in favor of "
"response_model_exclude_unset to keep in line with Pydantic v1, support for "
"it will be removed soon."
)
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
# ignore mypy error until enum schemas are released
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX # type: ignore
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
if PYDANTIC_1:
return response_field(field_info=field_info)
else: # pragma: nocover
return response_field(schema=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField, *, cloned_types: Dict[Type[BaseModel], Type[BaseModel]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__ # type: ignore
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
if PYDANTIC_1:
new_field.field_info = field.field_info
else: # pragma: nocover
new_field.schema = field.schema # type: ignore
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
if PYDANTIC_1:
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
else: # pragma: nocover
new_field.whole_pre_validators = field.whole_pre_validators # type: ignore
new_field.whole_post_validators = field.whole_post_validators # type: ignore
new_field.parse_json = field.parse_json
new_field.shape = field.shape
try:
new_field.populate_validators()
except AttributeError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
new_field._populate_validators() # type: ignore
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def deep_dict_update(main_dict: dict, update_dict: dict) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_sql_databases/test_sql_databases_py310.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_sql_databases/test_sql_databases_py310.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_sql_databases/test_sql_databases_py310.py:7: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_sql_databases/test_sql_databases_py310.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.63s ===============================
| {'total': 7, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 7, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_sql_databases/test_sql_databases_py310.py |
1_fastapi_callee__utils__is_body_allowed_for_status_code__c43120258fa89bc20d6f8ee671b6ead9ab223fc7 | callee | fail-to-pass | https://github.com/fastapi/fastapi | c43120258fa89bc20d6f8ee671b6ead9ab223fc7 | function | is_body_allowed_for_status_code | utils.py | def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 304})
| def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 304})
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo()
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_sql_databases/test_testing_databases_py39.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_sql_databases/test_testing_databases_py39.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_sql_databases/test_testing_databases_py39.py:7: in <module>
from ...utils import needs_py39, needs_pydanticv1
tests/utils.py:4: in <module>
from fastapi._compat import PYDANTIC_V2
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_sql_databases/test_testing_databases_py39.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.62s ===============================
| {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 1, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_sql_databases/test_testing_databases_py39.py |
1_fastapi_callee__utils__get_path_param_names__b9d912c6380661647b51c322820fcb4ba08f32d2 | callee | fail-to-pass | https://github.com/fastapi/fastapi | b9d912c6380661647b51c322820fcb4ba08f32d2 | function | get_path_param_names | utils.py | def get_path_param_names(path: str):
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
| def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
| import re
from typing import Dict, Sequence, Set, Type
from starlette.routing import BaseRoute
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseModel
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
def get_flat_models_from_routes(routes: Sequence[BaseRoute]):
body_fields_from_routes = []
responses_from_routes = []
for route in routes:
if route.include_in_schema and isinstance(route, routing.APIRoute):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
):
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
| import re
from typing import Dict, Sequence, Set, Type
from starlette.routing import BaseRoute
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseModel
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
def get_flat_models_from_routes(routes: Sequence[BaseRoute]):
body_fields_from_routes = []
responses_from_routes = []
for route in routes:
if route.include_in_schema and isinstance(route, routing.APIRoute):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
):
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str):
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_sql_databases/test_testing_databases_py39.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_sql_databases/test_testing_databases_py39.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_sql_databases/test_testing_databases_py39.py:7: in <module>
from ...utils import needs_py39, needs_pydanticv1
tests/utils.py:4: in <module>
from fastapi._compat import PYDANTIC_V2
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_sql_databases/test_testing_databases_py39.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.62s ===============================
| {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 1, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_sql_databases/test_testing_databases_py39.py |
1_fastapi_callee__utils__create_cloned_field__aa84ac8e3e305d76e71c0c1b31237517948fa12f | callee | fail-to-pass | https://github.com/fastapi/fastapi | aa84ac8e3e305d76e71c0c1b31237517948fa12f | function | create_cloned_field | utils.py | def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(
routes: Sequence[Type[BaseRoute]]
) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
default = None,
required = False,
model_config = BaseConfig,
schema = Schema(None),
)
new_field.has_alias= field.has_alias
new_field.alias= field.alias
new_field.class_validators= field.class_validators
new_field.default= field.default
new_field.required= field.required
new_field.model_config= field.model_config
new_field.schema= field.schema
new_field.allow_none= field.allow_none
new_field.validate_always= field.validate_always
if field.sub_fields:
new_field.sub_fields= [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(
routes: Sequence[Type[BaseRoute]]
) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_sql_databases/test_testing_databases_py39.py _
tests/test_tutorial/test_sql_databases/test_testing_databases_py39.py:7: in <module>
from ...utils import needs_py39, needs_pydanticv1
tests/utils.py:4: in <module>
from fastapi._compat import PYDANTIC_V2
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
fastapi/utils.py:109: in <module>
def create_cloned_field(field: Field) -> Field:
E NameError: name 'Field' is not defined
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_sql_databases/test_testing_databases_py39.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.63s ===============================
| {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 1, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_sql_databases/test_testing_databases_py39.py |
1_fastapi_callee__utils__generate_operation_id_for_path__687065509b047a642acd1d281d6d68c6698509b1 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 687065509b047a642acd1d281d6d68c6698509b1 | function | generate_operation_id_for_path | utils.py | def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(routes: Sequence[BaseRoute]) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(routes: Sequence[BaseRoute]) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_sql_databases/test_testing_databases_py39.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_sql_databases/test_testing_databases_py39.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_sql_databases/test_testing_databases_py39.py:7: in <module>
from ...utils import needs_py39, needs_pydanticv1
tests/utils.py:4: in <module>
from fastapi._compat import PYDANTIC_V2
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_sql_databases/test_testing_databases_py39.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.64s ===============================
| {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 1, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_sql_databases/test_testing_databases_py39.py |
1_fastapi_callee__utils__generate_unique_id__8a0d4c79c18e6bd219b965b9d50578c139e7efd8 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 8a0d4c79c18e6bd219b965b9d50578c139e7efd8 | function | generate_unique_id | utils.py | def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
| def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_sql_databases/test_testing_databases_py39.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_sql_databases/test_testing_databases_py39.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_sql_databases/test_testing_databases_py39.py:7: in <module>
from ...utils import needs_py39, needs_pydanticv1
tests/utils.py:4: in <module>
from fastapi._compat import PYDANTIC_V2
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_sql_databases/test_testing_databases_py39.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.63s ===============================
| {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 1, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_sql_databases/test_testing_databases_py39.py |
1_fastapi_callee__utils__deep_dict_update__181a32236a32305788b87d10e7990d9564ae2056 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 181a32236a32305788b87d10e7990d9564ae2056 | function | deep_dict_update | utils.py | def deep_dict_update(main_dict: dict, update_dict: dict) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
| def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
| import functools
import re
from dataclasses import is_dataclass
from enum import Enum
from typing import Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.logger import logger
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
try:
from pydantic.fields import FieldInfo, ModelField, UndefinedType
PYDANTIC_1 = True
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic.fields import Field as ModelField # type: ignore
from pydantic import Schema as FieldInfo # type: ignore
class UndefinedType: # type: ignore
def __repr__(self) -> str:
return "PydanticUndefined"
logger.warning(
"Pydantic versions < 1.0.0 are deprecated in FastAPI and support will be "
"removed soon."
)
PYDANTIC_1 = False
# TODO: remove when removing support for Pydantic < 1.0.0
def get_field_info(field: ModelField) -> FieldInfo:
if PYDANTIC_1:
return field.field_info # type: ignore
else:
return field.schema # type: ignore # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
def warning_response_model_skip_defaults_deprecated() -> None:
logger.warning( # pragma: nocover
"response_model_skip_defaults has been deprecated in favor of "
"response_model_exclude_unset to keep in line with Pydantic v1, support for "
"it will be removed soon."
)
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
# ignore mypy error until enum schemas are released
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX # type: ignore
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
if PYDANTIC_1:
return response_field(field_info=field_info)
else: # pragma: nocover
return response_field(schema=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField, *, cloned_types: Dict[Type[BaseModel], Type[BaseModel]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__ # type: ignore
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
if PYDANTIC_1:
new_field.field_info = field.field_info
else: # pragma: nocover
new_field.schema = field.schema # type: ignore
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
if PYDANTIC_1:
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
else: # pragma: nocover
new_field.whole_pre_validators = field.whole_pre_validators # type: ignore
new_field.whole_post_validators = field.whole_post_validators # type: ignore
new_field.parse_json = field.parse_json
new_field.shape = field.shape
try:
new_field.populate_validators()
except AttributeError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
new_field._populate_validators() # type: ignore
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
| import functools
import re
from dataclasses import is_dataclass
from enum import Enum
from typing import Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.logger import logger
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
try:
from pydantic.fields import FieldInfo, ModelField, UndefinedType
PYDANTIC_1 = True
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic.fields import Field as ModelField # type: ignore
from pydantic import Schema as FieldInfo # type: ignore
class UndefinedType: # type: ignore
def __repr__(self) -> str:
return "PydanticUndefined"
logger.warning(
"Pydantic versions < 1.0.0 are deprecated in FastAPI and support will be "
"removed soon."
)
PYDANTIC_1 = False
# TODO: remove when removing support for Pydantic < 1.0.0
def get_field_info(field: ModelField) -> FieldInfo:
if PYDANTIC_1:
return field.field_info # type: ignore
else:
return field.schema # type: ignore # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
def warning_response_model_skip_defaults_deprecated() -> None:
logger.warning( # pragma: nocover
"response_model_skip_defaults has been deprecated in favor of "
"response_model_exclude_unset to keep in line with Pydantic v1, support for "
"it will be removed soon."
)
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
# ignore mypy error until enum schemas are released
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX # type: ignore
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
if PYDANTIC_1:
return response_field(field_info=field_info)
else: # pragma: nocover
return response_field(schema=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField, *, cloned_types: Dict[Type[BaseModel], Type[BaseModel]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__ # type: ignore
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
if PYDANTIC_1:
new_field.field_info = field.field_info
else: # pragma: nocover
new_field.schema = field.schema # type: ignore
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
if PYDANTIC_1:
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
else: # pragma: nocover
new_field.whole_pre_validators = field.whole_pre_validators # type: ignore
new_field.whole_post_validators = field.whole_post_validators # type: ignore
new_field.parse_json = field.parse_json
new_field.shape = field.shape
try:
new_field.populate_validators()
except AttributeError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
new_field._populate_validators() # type: ignore
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def deep_dict_update(main_dict: dict, update_dict: dict) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_sql_databases/test_testing_databases_py39.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_sql_databases/test_testing_databases_py39.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_sql_databases/test_testing_databases_py39.py:7: in <module>
from ...utils import needs_py39, needs_pydanticv1
tests/utils.py:4: in <module>
from fastapi._compat import PYDANTIC_V2
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_sql_databases/test_testing_databases_py39.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.65s ===============================
| {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 1, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_sql_databases/test_testing_databases_py39.py |
1_fastapi_callee__utils__is_body_allowed_for_status_code__c43120258fa89bc20d6f8ee671b6ead9ab223fc7 | callee | fail-to-pass | https://github.com/fastapi/fastapi | c43120258fa89bc20d6f8ee671b6ead9ab223fc7 | function | is_body_allowed_for_status_code | utils.py | def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 304})
| def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 304})
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo()
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_sql_databases/test_sql_databases_py39.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_sql_databases/test_sql_databases_py39.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_sql_databases/test_sql_databases_py39.py:7: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_sql_databases/test_sql_databases_py39.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.63s ===============================
| {'total': 7, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 7, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_sql_databases/test_sql_databases_py39.py |
1_fastapi_callee__utils__get_path_param_names__b9d912c6380661647b51c322820fcb4ba08f32d2 | callee | fail-to-pass | https://github.com/fastapi/fastapi | b9d912c6380661647b51c322820fcb4ba08f32d2 | function | get_path_param_names | utils.py | def get_path_param_names(path: str):
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
| def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
| import re
from typing import Dict, Sequence, Set, Type
from starlette.routing import BaseRoute
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseModel
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
def get_flat_models_from_routes(routes: Sequence[BaseRoute]):
body_fields_from_routes = []
responses_from_routes = []
for route in routes:
if route.include_in_schema and isinstance(route, routing.APIRoute):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
):
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
| import re
from typing import Dict, Sequence, Set, Type
from starlette.routing import BaseRoute
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseModel
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
def get_flat_models_from_routes(routes: Sequence[BaseRoute]):
body_fields_from_routes = []
responses_from_routes = []
for route in routes:
if route.include_in_schema and isinstance(route, routing.APIRoute):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
):
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str):
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_sql_databases/test_sql_databases_py39.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_sql_databases/test_sql_databases_py39.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_sql_databases/test_sql_databases_py39.py:7: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_sql_databases/test_sql_databases_py39.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.63s ===============================
| {'total': 7, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 7, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_sql_databases/test_sql_databases_py39.py |
1_fastapi_callee__utils__create_cloned_field__aa84ac8e3e305d76e71c0c1b31237517948fa12f | callee | fail-to-pass | https://github.com/fastapi/fastapi | aa84ac8e3e305d76e71c0c1b31237517948fa12f | function | create_cloned_field | utils.py | def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(
routes: Sequence[Type[BaseRoute]]
) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
default = None,
required = False,
model_config = BaseConfig,
schema = Schema(None),
)
new_field.has_alias= field.has_alias
new_field.alias= field.alias
new_field.class_validators= field.class_validators
new_field.default= field.default
new_field.required= field.required
new_field.model_config= field.model_config
new_field.schema= field.schema
new_field.allow_none= field.allow_none
new_field.validate_always= field.validate_always
if field.sub_fields:
new_field.sub_fields= [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(
routes: Sequence[Type[BaseRoute]]
) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_sql_databases/test_sql_databases_py39.py _
tests/test_tutorial/test_sql_databases/test_sql_databases_py39.py:7: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
fastapi/utils.py:109: in <module>
def create_cloned_field(field: Field) -> Field:
E NameError: name 'Field' is not defined
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_sql_databases/test_sql_databases_py39.py - Nam...
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.63s ===============================
| {'total': 7, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 7, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_sql_databases/test_sql_databases_py39.py |
1_fastapi_callee__utils__generate_operation_id_for_path__687065509b047a642acd1d281d6d68c6698509b1 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 687065509b047a642acd1d281d6d68c6698509b1 | function | generate_operation_id_for_path | utils.py | def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(routes: Sequence[BaseRoute]) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(routes: Sequence[BaseRoute]) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_sql_databases/test_sql_databases_py39.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_sql_databases/test_sql_databases_py39.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_sql_databases/test_sql_databases_py39.py:7: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_sql_databases/test_sql_databases_py39.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.63s ===============================
| {'total': 7, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 7, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_sql_databases/test_sql_databases_py39.py |
1_fastapi_callee__utils__generate_unique_id__8a0d4c79c18e6bd219b965b9d50578c139e7efd8 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 8a0d4c79c18e6bd219b965b9d50578c139e7efd8 | function | generate_unique_id | utils.py | def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
| def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_sql_databases/test_sql_databases_py39.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_sql_databases/test_sql_databases_py39.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_sql_databases/test_sql_databases_py39.py:7: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_sql_databases/test_sql_databases_py39.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.63s ===============================
| {'total': 7, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 7, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_sql_databases/test_sql_databases_py39.py |
1_fastapi_callee__utils__deep_dict_update__181a32236a32305788b87d10e7990d9564ae2056 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 181a32236a32305788b87d10e7990d9564ae2056 | function | deep_dict_update | utils.py | def deep_dict_update(main_dict: dict, update_dict: dict) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
| def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
| import functools
import re
from dataclasses import is_dataclass
from enum import Enum
from typing import Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.logger import logger
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
try:
from pydantic.fields import FieldInfo, ModelField, UndefinedType
PYDANTIC_1 = True
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic.fields import Field as ModelField # type: ignore
from pydantic import Schema as FieldInfo # type: ignore
class UndefinedType: # type: ignore
def __repr__(self) -> str:
return "PydanticUndefined"
logger.warning(
"Pydantic versions < 1.0.0 are deprecated in FastAPI and support will be "
"removed soon."
)
PYDANTIC_1 = False
# TODO: remove when removing support for Pydantic < 1.0.0
def get_field_info(field: ModelField) -> FieldInfo:
if PYDANTIC_1:
return field.field_info # type: ignore
else:
return field.schema # type: ignore # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
def warning_response_model_skip_defaults_deprecated() -> None:
logger.warning( # pragma: nocover
"response_model_skip_defaults has been deprecated in favor of "
"response_model_exclude_unset to keep in line with Pydantic v1, support for "
"it will be removed soon."
)
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
# ignore mypy error until enum schemas are released
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX # type: ignore
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
if PYDANTIC_1:
return response_field(field_info=field_info)
else: # pragma: nocover
return response_field(schema=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField, *, cloned_types: Dict[Type[BaseModel], Type[BaseModel]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__ # type: ignore
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
if PYDANTIC_1:
new_field.field_info = field.field_info
else: # pragma: nocover
new_field.schema = field.schema # type: ignore
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
if PYDANTIC_1:
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
else: # pragma: nocover
new_field.whole_pre_validators = field.whole_pre_validators # type: ignore
new_field.whole_post_validators = field.whole_post_validators # type: ignore
new_field.parse_json = field.parse_json
new_field.shape = field.shape
try:
new_field.populate_validators()
except AttributeError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
new_field._populate_validators() # type: ignore
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
| import functools
import re
from dataclasses import is_dataclass
from enum import Enum
from typing import Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.logger import logger
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
try:
from pydantic.fields import FieldInfo, ModelField, UndefinedType
PYDANTIC_1 = True
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic.fields import Field as ModelField # type: ignore
from pydantic import Schema as FieldInfo # type: ignore
class UndefinedType: # type: ignore
def __repr__(self) -> str:
return "PydanticUndefined"
logger.warning(
"Pydantic versions < 1.0.0 are deprecated in FastAPI and support will be "
"removed soon."
)
PYDANTIC_1 = False
# TODO: remove when removing support for Pydantic < 1.0.0
def get_field_info(field: ModelField) -> FieldInfo:
if PYDANTIC_1:
return field.field_info # type: ignore
else:
return field.schema # type: ignore # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
def warning_response_model_skip_defaults_deprecated() -> None:
logger.warning( # pragma: nocover
"response_model_skip_defaults has been deprecated in favor of "
"response_model_exclude_unset to keep in line with Pydantic v1, support for "
"it will be removed soon."
)
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
# ignore mypy error until enum schemas are released
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX # type: ignore
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
if PYDANTIC_1:
return response_field(field_info=field_info)
else: # pragma: nocover
return response_field(schema=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField, *, cloned_types: Dict[Type[BaseModel], Type[BaseModel]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__ # type: ignore
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
if PYDANTIC_1:
new_field.field_info = field.field_info
else: # pragma: nocover
new_field.schema = field.schema # type: ignore
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
if PYDANTIC_1:
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
else: # pragma: nocover
new_field.whole_pre_validators = field.whole_pre_validators # type: ignore
new_field.whole_post_validators = field.whole_post_validators # type: ignore
new_field.parse_json = field.parse_json
new_field.shape = field.shape
try:
new_field.populate_validators()
except AttributeError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
new_field._populate_validators() # type: ignore
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def deep_dict_update(main_dict: dict, update_dict: dict) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_sql_databases/test_sql_databases_py39.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_sql_databases/test_sql_databases_py39.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_sql_databases/test_sql_databases_py39.py:7: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_sql_databases/test_sql_databases_py39.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.64s ===============================
| {'total': 7, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 7, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_sql_databases/test_sql_databases_py39.py |
1_fastapi_callee__tutorial003__get_authors__684c0077cc0ef48364cd3241b80a135213376d22 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 684c0077cc0ef48364cd3241b80a135213376d22 | function | get_authors | tutorial003.py | def get_authors(): # (8)
return [ # (9)
{
"name": "Breaters",
"items": [
{
"name": "Island In The Moon",
"description": "A place to be be playin' and havin' fun",
},
{"name": "Holy Buddies"},
],
},
{
"name": "System of an Up",
"items": [
{
"name": "Salt",
"description": "The kombucha mushroom people's favorite",
},
{"name": "Pad Thai"},
{
"name": "Lonely Night",
"description": "The mostests lonliest nightiest of allest",
},
],
},
]
| def get_authors(): # (8)
return [ # (9)
{
"name": "Breaters",
"items": [
{
"name": "Island In The Moon",
"description": "A place to be playin' and havin' fun",
},
{"name": "Holy Buddies"},
],
},
{
"name": "System of an Up",
"items": [
{
"name": "Salt",
"description": "The kombucha mushroom people's favorite",
},
{"name": "Pad Thai"},
{
"name": "Lonely Night",
"description": "The mostests lonliest nightiest of allest",
},
],
},
]
| from dataclasses import field # (1)
from typing import List, Optional
from fastapi import FastAPI
from pydantic.dataclasses import dataclass # (2)
@dataclass
class Item:
name: str
description: Optional[str] = None
@dataclass
class Author:
name: str
items: List[Item] = field(default_factory=list) # (3)
app = FastAPI()
@app.post("/authors/{author_id}/items/", response_model=Author) # (4)
async def create_author_items(author_id: str, items: List[Item]): # (5)
return {"name": author_id, "items": items} # (6)
@app.get("/authors/", response_model=List[Author]) # (7)
| from dataclasses import field # (1)
from typing import List, Optional
from fastapi import FastAPI
from pydantic.dataclasses import dataclass # (2)
@dataclass
class Item:
name: str
description: Optional[str] = None
@dataclass
class Author:
name: str
items: List[Item] = field(default_factory=list) # (3)
app = FastAPI()
@app.post("/authors/{author_id}/items/", response_model=Author) # (4)
async def create_author_items(author_id: str, items: List[Item]): # (5)
return {"name": author_id, "items": items} # (6)
@app.get("/authors/", response_model=List[Author]) # (7)
def get_authors(): # (8)
return [ # (9)
{
"name": "Breaters",
"items": [
{
"name": "Island In The Moon",
"description": "A place to be be playin' and havin' fun",
},
{"name": "Holy Buddies"},
],
},
{
"name": "System of an Up",
"items": [
{
"name": "Salt",
"description": "The kombucha mushroom people's favorite",
},
{"name": "Pad Thai"},
{
"name": "Lonely Night",
"description": "The mostests lonliest nightiest of allest",
},
],
},
]
| from dataclasses import field # (1)
from typing import List, Union
from fastapi import FastAPI
from pydantic.dataclasses import dataclass # (2)
@dataclass
class Item:
name: str
description: Union[str, None] = None
@dataclass
class Author:
name: str
items: List[Item] = field(default_factory=list) # (3)
app = FastAPI()
@app.post("/authors/{author_id}/items/", response_model=Author) # (4)
async def create_author_items(author_id: str, items: List[Item]): # (5)
return {"name": author_id, "items": items} # (6)
@app.get("/authors/", response_model=List[Author]) # (7)
| from dataclasses import field # (1)
from typing import List, Union
from fastapi import FastAPI
from pydantic.dataclasses import dataclass # (2)
@dataclass
class Item:
name: str
description: Union[str, None] = None
@dataclass
class Author:
name: str
items: List[Item] = field(default_factory=list) # (3)
app = FastAPI()
@app.post("/authors/{author_id}/items/", response_model=Author) # (4)
async def create_author_items(author_id: str, items: List[Item]): # (5)
return {"name": author_id, "items": items} # (6)
@app.get("/authors/", response_model=List[Author]) # (7)
def get_authors(): # (8)
return [ # (9)
{
"name": "Breaters",
"items": [
{
"name": "Island In The Moon",
"description": "A place to be playin' and havin' fun",
},
{"name": "Holy Buddies"},
],
},
{
"name": "System of an Up",
"items": [
{
"name": "Salt",
"description": "The kombucha mushroom people's favorite",
},
{"name": "Pad Thai"},
{
"name": "Lonely Night",
"description": "The mostests lonliest nightiest of allest",
},
],
},
]
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 4 items
tests/test_tutorial/test_dataclasses/test_tutorial003.py::test_post_authors_item PASSED [ 25%]
tests/test_tutorial/test_dataclasses/test_tutorial003.py::test_get_authors FAILED [ 50%]
tests/test_tutorial/test_dataclasses/test_tutorial003.py::test_openapi_schema PASSED [ 75%]
tests/test_tutorial/test_dataclasses/test_tutorial003.py::test_openapi_schema_pv1 SKIPPED [100%]
=================================== FAILURES ===================================
_______________________________ test_get_authors _______________________________
def test_get_authors():
response = client.get("/authors/")
assert response.status_code == 200
> assert response.json() == [
{
"name": "Breaters",
"items": [
{
"name": "Island In The Moon",
"description": "A place to be playin' and havin' fun",
},
{"name": "Holy Buddies", "description": None},
],
},
{
"name": "System of an Up",
"items": [
{
"name": "Salt",
"description": "The kombucha mushroom people's favorite",
},
{"name": "Pad Thai", "description": None},
{
"name": "Lonely Night",
"description": "The mostests lonliest nightiest of allest",
},
],
},
]
E assert [{'items': [{...em of an Up'}] == [{'items': [{...em of an Up'}]
E
E At index 0 diff: {'name': 'Breaters', 'items': [{'name': 'Island In The Moon', 'description': "A place to be be playin' and havin' fun"}, {'name': 'Holy Buddies', 'description': None}]} != {'name': 'Breaters', 'items': [{'name': 'Island In The Moon', 'description': "A place to be playin' and havin' fun"}, {'name': 'Holy Buddies', 'description': None}]}
E
E Full diff:
E [
E {
E 'items': [...
E
E ...Full output truncated (31 lines hidden), use '-vv' to show
tests/test_tutorial/test_dataclasses/test_tutorial003.py:28: AssertionError
=========================== short test summary info ============================
FAILED tests/test_tutorial/test_dataclasses/test_tutorial003.py::test_get_authors
==================== 1 failed, 2 passed, 1 skipped in 0.68s ====================
| {'total': 4, 'passed': 3, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 1, 'warning': 0, 'error': 0} | {'total': 4, 'passed': 2, 'xpassed': 0, 'failed': 1, 'xfailed': 0, 'deselected': 0, 'skipped': 1, 'warning': 0, 'error': 0} | ./test_repo/docs_src/dataclasses/tutorial003.py | ./test_repo/tests/test_tutorial/test_dataclasses/test_tutorial003.py |
1_fastapi_callee__utils__is_body_allowed_for_status_code__c43120258fa89bc20d6f8ee671b6ead9ab223fc7 | callee | fail-to-pass | https://github.com/fastapi/fastapi | c43120258fa89bc20d6f8ee671b6ead9ab223fc7 | function | is_body_allowed_for_status_code | utils.py | def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 304})
| def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 304})
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo()
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
__ ERROR collecting tests/test_tutorial/test_dataclasses/test_tutorial003.py ___
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_dataclasses/test_tutorial003.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_dataclasses/test_tutorial003.py:1: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_dataclasses/test_tutorial003.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.61s ===============================
| {'total': 4, 'passed': 3, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 1, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_dataclasses/test_tutorial003.py |
1_fastapi_callee__utils__get_path_param_names__b9d912c6380661647b51c322820fcb4ba08f32d2 | callee | fail-to-pass | https://github.com/fastapi/fastapi | b9d912c6380661647b51c322820fcb4ba08f32d2 | function | get_path_param_names | utils.py | def get_path_param_names(path: str):
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
| def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
| import re
from typing import Dict, Sequence, Set, Type
from starlette.routing import BaseRoute
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseModel
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
def get_flat_models_from_routes(routes: Sequence[BaseRoute]):
body_fields_from_routes = []
responses_from_routes = []
for route in routes:
if route.include_in_schema and isinstance(route, routing.APIRoute):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
):
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
| import re
from typing import Dict, Sequence, Set, Type
from starlette.routing import BaseRoute
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseModel
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
def get_flat_models_from_routes(routes: Sequence[BaseRoute]):
body_fields_from_routes = []
responses_from_routes = []
for route in routes:
if route.include_in_schema and isinstance(route, routing.APIRoute):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
):
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str):
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
__ ERROR collecting tests/test_tutorial/test_dataclasses/test_tutorial003.py ___
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_dataclasses/test_tutorial003.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_dataclasses/test_tutorial003.py:1: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_dataclasses/test_tutorial003.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.61s ===============================
| {'total': 4, 'passed': 3, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 1, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_dataclasses/test_tutorial003.py |
1_fastapi_callee__utils__create_cloned_field__aa84ac8e3e305d76e71c0c1b31237517948fa12f | callee | fail-to-pass | https://github.com/fastapi/fastapi | aa84ac8e3e305d76e71c0c1b31237517948fa12f | function | create_cloned_field | utils.py | def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(
routes: Sequence[Type[BaseRoute]]
) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
default = None,
required = False,
model_config = BaseConfig,
schema = Schema(None),
)
new_field.has_alias= field.has_alias
new_field.alias= field.alias
new_field.class_validators= field.class_validators
new_field.default= field.default
new_field.required= field.required
new_field.model_config= field.model_config
new_field.schema= field.schema
new_field.allow_none= field.allow_none
new_field.validate_always= field.validate_always
if field.sub_fields:
new_field.sub_fields= [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(
routes: Sequence[Type[BaseRoute]]
) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
__ ERROR collecting tests/test_tutorial/test_dataclasses/test_tutorial003.py ___
tests/test_tutorial/test_dataclasses/test_tutorial003.py:1: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
fastapi/utils.py:109: in <module>
def create_cloned_field(field: Field) -> Field:
E NameError: name 'Field' is not defined
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_dataclasses/test_tutorial003.py - NameError: n...
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.62s ===============================
| {'total': 4, 'passed': 3, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 1, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_dataclasses/test_tutorial003.py |
1_fastapi_callee__utils__generate_operation_id_for_path__687065509b047a642acd1d281d6d68c6698509b1 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 687065509b047a642acd1d281d6d68c6698509b1 | function | generate_operation_id_for_path | utils.py | def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(routes: Sequence[BaseRoute]) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(routes: Sequence[BaseRoute]) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
__ ERROR collecting tests/test_tutorial/test_dataclasses/test_tutorial003.py ___
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_dataclasses/test_tutorial003.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_dataclasses/test_tutorial003.py:1: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_dataclasses/test_tutorial003.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.61s ===============================
| {'total': 4, 'passed': 3, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 1, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_dataclasses/test_tutorial003.py |
1_fastapi_callee__utils__generate_unique_id__8a0d4c79c18e6bd219b965b9d50578c139e7efd8 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 8a0d4c79c18e6bd219b965b9d50578c139e7efd8 | function | generate_unique_id | utils.py | def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
| def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
__ ERROR collecting tests/test_tutorial/test_dataclasses/test_tutorial003.py ___
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_dataclasses/test_tutorial003.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_dataclasses/test_tutorial003.py:1: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_dataclasses/test_tutorial003.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.61s ===============================
| {'total': 4, 'passed': 3, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 1, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_dataclasses/test_tutorial003.py |
1_fastapi_callee__utils__deep_dict_update__181a32236a32305788b87d10e7990d9564ae2056 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 181a32236a32305788b87d10e7990d9564ae2056 | function | deep_dict_update | utils.py | def deep_dict_update(main_dict: dict, update_dict: dict) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
| def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
| import functools
import re
from dataclasses import is_dataclass
from enum import Enum
from typing import Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.logger import logger
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
try:
from pydantic.fields import FieldInfo, ModelField, UndefinedType
PYDANTIC_1 = True
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic.fields import Field as ModelField # type: ignore
from pydantic import Schema as FieldInfo # type: ignore
class UndefinedType: # type: ignore
def __repr__(self) -> str:
return "PydanticUndefined"
logger.warning(
"Pydantic versions < 1.0.0 are deprecated in FastAPI and support will be "
"removed soon."
)
PYDANTIC_1 = False
# TODO: remove when removing support for Pydantic < 1.0.0
def get_field_info(field: ModelField) -> FieldInfo:
if PYDANTIC_1:
return field.field_info # type: ignore
else:
return field.schema # type: ignore # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
def warning_response_model_skip_defaults_deprecated() -> None:
logger.warning( # pragma: nocover
"response_model_skip_defaults has been deprecated in favor of "
"response_model_exclude_unset to keep in line with Pydantic v1, support for "
"it will be removed soon."
)
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
# ignore mypy error until enum schemas are released
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX # type: ignore
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
if PYDANTIC_1:
return response_field(field_info=field_info)
else: # pragma: nocover
return response_field(schema=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField, *, cloned_types: Dict[Type[BaseModel], Type[BaseModel]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__ # type: ignore
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
if PYDANTIC_1:
new_field.field_info = field.field_info
else: # pragma: nocover
new_field.schema = field.schema # type: ignore
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
if PYDANTIC_1:
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
else: # pragma: nocover
new_field.whole_pre_validators = field.whole_pre_validators # type: ignore
new_field.whole_post_validators = field.whole_post_validators # type: ignore
new_field.parse_json = field.parse_json
new_field.shape = field.shape
try:
new_field.populate_validators()
except AttributeError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
new_field._populate_validators() # type: ignore
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
| import functools
import re
from dataclasses import is_dataclass
from enum import Enum
from typing import Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.logger import logger
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
try:
from pydantic.fields import FieldInfo, ModelField, UndefinedType
PYDANTIC_1 = True
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic.fields import Field as ModelField # type: ignore
from pydantic import Schema as FieldInfo # type: ignore
class UndefinedType: # type: ignore
def __repr__(self) -> str:
return "PydanticUndefined"
logger.warning(
"Pydantic versions < 1.0.0 are deprecated in FastAPI and support will be "
"removed soon."
)
PYDANTIC_1 = False
# TODO: remove when removing support for Pydantic < 1.0.0
def get_field_info(field: ModelField) -> FieldInfo:
if PYDANTIC_1:
return field.field_info # type: ignore
else:
return field.schema # type: ignore # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
def warning_response_model_skip_defaults_deprecated() -> None:
logger.warning( # pragma: nocover
"response_model_skip_defaults has been deprecated in favor of "
"response_model_exclude_unset to keep in line with Pydantic v1, support for "
"it will be removed soon."
)
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
# ignore mypy error until enum schemas are released
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX # type: ignore
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
if PYDANTIC_1:
return response_field(field_info=field_info)
else: # pragma: nocover
return response_field(schema=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField, *, cloned_types: Dict[Type[BaseModel], Type[BaseModel]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__ # type: ignore
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
if PYDANTIC_1:
new_field.field_info = field.field_info
else: # pragma: nocover
new_field.schema = field.schema # type: ignore
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
if PYDANTIC_1:
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
else: # pragma: nocover
new_field.whole_pre_validators = field.whole_pre_validators # type: ignore
new_field.whole_post_validators = field.whole_post_validators # type: ignore
new_field.parse_json = field.parse_json
new_field.shape = field.shape
try:
new_field.populate_validators()
except AttributeError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
new_field._populate_validators() # type: ignore
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def deep_dict_update(main_dict: dict, update_dict: dict) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
__ ERROR collecting tests/test_tutorial/test_dataclasses/test_tutorial003.py ___
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_dataclasses/test_tutorial003.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_dataclasses/test_tutorial003.py:1: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_dataclasses/test_tutorial003.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.62s ===============================
| {'total': 4, 'passed': 3, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 1, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_dataclasses/test_tutorial003.py |
1_fastapi_callee__utils__is_body_allowed_for_status_code__c43120258fa89bc20d6f8ee671b6ead9ab223fc7 | callee | fail-to-pass | https://github.com/fastapi/fastapi | c43120258fa89bc20d6f8ee671b6ead9ab223fc7 | function | is_body_allowed_for_status_code | utils.py | def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 304})
| def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 304})
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo()
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial006_py310.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_response_model/test_tutorial006_py310.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_response_model/test_tutorial006_py310.py:3: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial006_py310.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.62s ===============================
| {'total': 3, 'passed': 3, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial006_py310.py |
1_fastapi_callee__utils__get_path_param_names__b9d912c6380661647b51c322820fcb4ba08f32d2 | callee | fail-to-pass | https://github.com/fastapi/fastapi | b9d912c6380661647b51c322820fcb4ba08f32d2 | function | get_path_param_names | utils.py | def get_path_param_names(path: str):
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
| def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
| import re
from typing import Dict, Sequence, Set, Type
from starlette.routing import BaseRoute
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseModel
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
def get_flat_models_from_routes(routes: Sequence[BaseRoute]):
body_fields_from_routes = []
responses_from_routes = []
for route in routes:
if route.include_in_schema and isinstance(route, routing.APIRoute):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
):
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
| import re
from typing import Dict, Sequence, Set, Type
from starlette.routing import BaseRoute
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseModel
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
def get_flat_models_from_routes(routes: Sequence[BaseRoute]):
body_fields_from_routes = []
responses_from_routes = []
for route in routes:
if route.include_in_schema and isinstance(route, routing.APIRoute):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
):
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str):
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial006_py310.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_response_model/test_tutorial006_py310.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_response_model/test_tutorial006_py310.py:3: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial006_py310.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.64s ===============================
| {'total': 3, 'passed': 3, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial006_py310.py |
1_fastapi_callee__utils__create_cloned_field__aa84ac8e3e305d76e71c0c1b31237517948fa12f | callee | fail-to-pass | https://github.com/fastapi/fastapi | aa84ac8e3e305d76e71c0c1b31237517948fa12f | function | create_cloned_field | utils.py | def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(
routes: Sequence[Type[BaseRoute]]
) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
default = None,
required = False,
model_config = BaseConfig,
schema = Schema(None),
)
new_field.has_alias= field.has_alias
new_field.alias= field.alias
new_field.class_validators= field.class_validators
new_field.default= field.default
new_field.required= field.required
new_field.model_config= field.model_config
new_field.schema= field.schema
new_field.allow_none= field.allow_none
new_field.validate_always= field.validate_always
if field.sub_fields:
new_field.sub_fields= [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(
routes: Sequence[Type[BaseRoute]]
) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial006_py310.py _
tests/test_tutorial/test_response_model/test_tutorial006_py310.py:3: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
fastapi/utils.py:109: in <module>
def create_cloned_field(field: Field) -> Field:
E NameError: name 'Field' is not defined
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial006_py310.py - Nam...
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.65s ===============================
| {'total': 3, 'passed': 3, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial006_py310.py |
1_fastapi_callee__utils__generate_operation_id_for_path__687065509b047a642acd1d281d6d68c6698509b1 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 687065509b047a642acd1d281d6d68c6698509b1 | function | generate_operation_id_for_path | utils.py | def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(routes: Sequence[BaseRoute]) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(routes: Sequence[BaseRoute]) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial006_py310.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_response_model/test_tutorial006_py310.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_response_model/test_tutorial006_py310.py:3: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial006_py310.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.62s ===============================
| {'total': 3, 'passed': 3, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial006_py310.py |
1_fastapi_callee__utils__generate_unique_id__8a0d4c79c18e6bd219b965b9d50578c139e7efd8 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 8a0d4c79c18e6bd219b965b9d50578c139e7efd8 | function | generate_unique_id | utils.py | def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
| def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial006_py310.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_response_model/test_tutorial006_py310.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_response_model/test_tutorial006_py310.py:3: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial006_py310.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.62s ===============================
| {'total': 3, 'passed': 3, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial006_py310.py |
1_fastapi_callee__utils__deep_dict_update__181a32236a32305788b87d10e7990d9564ae2056 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 181a32236a32305788b87d10e7990d9564ae2056 | function | deep_dict_update | utils.py | def deep_dict_update(main_dict: dict, update_dict: dict) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
| def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
| import functools
import re
from dataclasses import is_dataclass
from enum import Enum
from typing import Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.logger import logger
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
try:
from pydantic.fields import FieldInfo, ModelField, UndefinedType
PYDANTIC_1 = True
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic.fields import Field as ModelField # type: ignore
from pydantic import Schema as FieldInfo # type: ignore
class UndefinedType: # type: ignore
def __repr__(self) -> str:
return "PydanticUndefined"
logger.warning(
"Pydantic versions < 1.0.0 are deprecated in FastAPI and support will be "
"removed soon."
)
PYDANTIC_1 = False
# TODO: remove when removing support for Pydantic < 1.0.0
def get_field_info(field: ModelField) -> FieldInfo:
if PYDANTIC_1:
return field.field_info # type: ignore
else:
return field.schema # type: ignore # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
def warning_response_model_skip_defaults_deprecated() -> None:
logger.warning( # pragma: nocover
"response_model_skip_defaults has been deprecated in favor of "
"response_model_exclude_unset to keep in line with Pydantic v1, support for "
"it will be removed soon."
)
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
# ignore mypy error until enum schemas are released
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX # type: ignore
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
if PYDANTIC_1:
return response_field(field_info=field_info)
else: # pragma: nocover
return response_field(schema=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField, *, cloned_types: Dict[Type[BaseModel], Type[BaseModel]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__ # type: ignore
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
if PYDANTIC_1:
new_field.field_info = field.field_info
else: # pragma: nocover
new_field.schema = field.schema # type: ignore
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
if PYDANTIC_1:
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
else: # pragma: nocover
new_field.whole_pre_validators = field.whole_pre_validators # type: ignore
new_field.whole_post_validators = field.whole_post_validators # type: ignore
new_field.parse_json = field.parse_json
new_field.shape = field.shape
try:
new_field.populate_validators()
except AttributeError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
new_field._populate_validators() # type: ignore
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
| import functools
import re
from dataclasses import is_dataclass
from enum import Enum
from typing import Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.logger import logger
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
try:
from pydantic.fields import FieldInfo, ModelField, UndefinedType
PYDANTIC_1 = True
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic.fields import Field as ModelField # type: ignore
from pydantic import Schema as FieldInfo # type: ignore
class UndefinedType: # type: ignore
def __repr__(self) -> str:
return "PydanticUndefined"
logger.warning(
"Pydantic versions < 1.0.0 are deprecated in FastAPI and support will be "
"removed soon."
)
PYDANTIC_1 = False
# TODO: remove when removing support for Pydantic < 1.0.0
def get_field_info(field: ModelField) -> FieldInfo:
if PYDANTIC_1:
return field.field_info # type: ignore
else:
return field.schema # type: ignore # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
def warning_response_model_skip_defaults_deprecated() -> None:
logger.warning( # pragma: nocover
"response_model_skip_defaults has been deprecated in favor of "
"response_model_exclude_unset to keep in line with Pydantic v1, support for "
"it will be removed soon."
)
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
# ignore mypy error until enum schemas are released
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX # type: ignore
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
if PYDANTIC_1:
return response_field(field_info=field_info)
else: # pragma: nocover
return response_field(schema=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField, *, cloned_types: Dict[Type[BaseModel], Type[BaseModel]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__ # type: ignore
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
if PYDANTIC_1:
new_field.field_info = field.field_info
else: # pragma: nocover
new_field.schema = field.schema # type: ignore
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
if PYDANTIC_1:
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
else: # pragma: nocover
new_field.whole_pre_validators = field.whole_pre_validators # type: ignore
new_field.whole_post_validators = field.whole_post_validators # type: ignore
new_field.parse_json = field.parse_json
new_field.shape = field.shape
try:
new_field.populate_validators()
except AttributeError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
new_field._populate_validators() # type: ignore
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def deep_dict_update(main_dict: dict, update_dict: dict) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial006_py310.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_response_model/test_tutorial006_py310.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_response_model/test_tutorial006_py310.py:3: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial006_py310.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.61s ===============================
| {'total': 3, 'passed': 3, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial006_py310.py |
1_fastapi_callee__utils__is_body_allowed_for_status_code__c43120258fa89bc20d6f8ee671b6ead9ab223fc7 | callee | fail-to-pass | https://github.com/fastapi/fastapi | c43120258fa89bc20d6f8ee671b6ead9ab223fc7 | function | is_body_allowed_for_status_code | utils.py | def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 304})
| def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 304})
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo()
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial003_04_py310.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_response_model/test_tutorial003_04_py310.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_response_model/test_tutorial003_04_py310.py:2: in <module>
from fastapi.exceptions import FastAPIError
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial003_04_py310.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.62s ===============================
| {'total': 1, 'passed': 1, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial003_04_py310.py |
1_fastapi_callee__utils__get_path_param_names__b9d912c6380661647b51c322820fcb4ba08f32d2 | callee | fail-to-pass | https://github.com/fastapi/fastapi | b9d912c6380661647b51c322820fcb4ba08f32d2 | function | get_path_param_names | utils.py | def get_path_param_names(path: str):
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
| def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
| import re
from typing import Dict, Sequence, Set, Type
from starlette.routing import BaseRoute
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseModel
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
def get_flat_models_from_routes(routes: Sequence[BaseRoute]):
body_fields_from_routes = []
responses_from_routes = []
for route in routes:
if route.include_in_schema and isinstance(route, routing.APIRoute):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
):
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
| import re
from typing import Dict, Sequence, Set, Type
from starlette.routing import BaseRoute
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseModel
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
def get_flat_models_from_routes(routes: Sequence[BaseRoute]):
body_fields_from_routes = []
responses_from_routes = []
for route in routes:
if route.include_in_schema and isinstance(route, routing.APIRoute):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
):
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str):
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial003_04_py310.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_response_model/test_tutorial003_04_py310.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_response_model/test_tutorial003_04_py310.py:2: in <module>
from fastapi.exceptions import FastAPIError
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial003_04_py310.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.62s ===============================
| {'total': 1, 'passed': 1, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial003_04_py310.py |
1_fastapi_callee__utils__create_cloned_field__aa84ac8e3e305d76e71c0c1b31237517948fa12f | callee | fail-to-pass | https://github.com/fastapi/fastapi | aa84ac8e3e305d76e71c0c1b31237517948fa12f | function | create_cloned_field | utils.py | def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(
routes: Sequence[Type[BaseRoute]]
) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
default = None,
required = False,
model_config = BaseConfig,
schema = Schema(None),
)
new_field.has_alias= field.has_alias
new_field.alias= field.alias
new_field.class_validators= field.class_validators
new_field.default= field.default
new_field.required= field.required
new_field.model_config= field.model_config
new_field.schema= field.schema
new_field.allow_none= field.allow_none
new_field.validate_always= field.validate_always
if field.sub_fields:
new_field.sub_fields= [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(
routes: Sequence[Type[BaseRoute]]
) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial003_04_py310.py _
tests/test_tutorial/test_response_model/test_tutorial003_04_py310.py:2: in <module>
from fastapi.exceptions import FastAPIError
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
fastapi/utils.py:109: in <module>
def create_cloned_field(field: Field) -> Field:
E NameError: name 'Field' is not defined
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial003_04_py310.py - ...
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.61s ===============================
| {'total': 1, 'passed': 1, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial003_04_py310.py |
1_fastapi_callee__utils__generate_operation_id_for_path__687065509b047a642acd1d281d6d68c6698509b1 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 687065509b047a642acd1d281d6d68c6698509b1 | function | generate_operation_id_for_path | utils.py | def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(routes: Sequence[BaseRoute]) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(routes: Sequence[BaseRoute]) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial003_04_py310.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_response_model/test_tutorial003_04_py310.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_response_model/test_tutorial003_04_py310.py:2: in <module>
from fastapi.exceptions import FastAPIError
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial003_04_py310.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.62s ===============================
| {'total': 1, 'passed': 1, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial003_04_py310.py |
1_fastapi_callee__utils__generate_unique_id__8a0d4c79c18e6bd219b965b9d50578c139e7efd8 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 8a0d4c79c18e6bd219b965b9d50578c139e7efd8 | function | generate_unique_id | utils.py | def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
| def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial003_04_py310.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_response_model/test_tutorial003_04_py310.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_response_model/test_tutorial003_04_py310.py:2: in <module>
from fastapi.exceptions import FastAPIError
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial003_04_py310.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.61s ===============================
| {'total': 1, 'passed': 1, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial003_04_py310.py |
1_fastapi_callee__utils__deep_dict_update__181a32236a32305788b87d10e7990d9564ae2056 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 181a32236a32305788b87d10e7990d9564ae2056 | function | deep_dict_update | utils.py | def deep_dict_update(main_dict: dict, update_dict: dict) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
| def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
| import functools
import re
from dataclasses import is_dataclass
from enum import Enum
from typing import Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.logger import logger
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
try:
from pydantic.fields import FieldInfo, ModelField, UndefinedType
PYDANTIC_1 = True
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic.fields import Field as ModelField # type: ignore
from pydantic import Schema as FieldInfo # type: ignore
class UndefinedType: # type: ignore
def __repr__(self) -> str:
return "PydanticUndefined"
logger.warning(
"Pydantic versions < 1.0.0 are deprecated in FastAPI and support will be "
"removed soon."
)
PYDANTIC_1 = False
# TODO: remove when removing support for Pydantic < 1.0.0
def get_field_info(field: ModelField) -> FieldInfo:
if PYDANTIC_1:
return field.field_info # type: ignore
else:
return field.schema # type: ignore # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
def warning_response_model_skip_defaults_deprecated() -> None:
logger.warning( # pragma: nocover
"response_model_skip_defaults has been deprecated in favor of "
"response_model_exclude_unset to keep in line with Pydantic v1, support for "
"it will be removed soon."
)
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
# ignore mypy error until enum schemas are released
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX # type: ignore
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
if PYDANTIC_1:
return response_field(field_info=field_info)
else: # pragma: nocover
return response_field(schema=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField, *, cloned_types: Dict[Type[BaseModel], Type[BaseModel]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__ # type: ignore
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
if PYDANTIC_1:
new_field.field_info = field.field_info
else: # pragma: nocover
new_field.schema = field.schema # type: ignore
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
if PYDANTIC_1:
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
else: # pragma: nocover
new_field.whole_pre_validators = field.whole_pre_validators # type: ignore
new_field.whole_post_validators = field.whole_post_validators # type: ignore
new_field.parse_json = field.parse_json
new_field.shape = field.shape
try:
new_field.populate_validators()
except AttributeError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
new_field._populate_validators() # type: ignore
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
| import functools
import re
from dataclasses import is_dataclass
from enum import Enum
from typing import Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.logger import logger
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
try:
from pydantic.fields import FieldInfo, ModelField, UndefinedType
PYDANTIC_1 = True
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic.fields import Field as ModelField # type: ignore
from pydantic import Schema as FieldInfo # type: ignore
class UndefinedType: # type: ignore
def __repr__(self) -> str:
return "PydanticUndefined"
logger.warning(
"Pydantic versions < 1.0.0 are deprecated in FastAPI and support will be "
"removed soon."
)
PYDANTIC_1 = False
# TODO: remove when removing support for Pydantic < 1.0.0
def get_field_info(field: ModelField) -> FieldInfo:
if PYDANTIC_1:
return field.field_info # type: ignore
else:
return field.schema # type: ignore # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
def warning_response_model_skip_defaults_deprecated() -> None:
logger.warning( # pragma: nocover
"response_model_skip_defaults has been deprecated in favor of "
"response_model_exclude_unset to keep in line with Pydantic v1, support for "
"it will be removed soon."
)
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
# ignore mypy error until enum schemas are released
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX # type: ignore
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
if PYDANTIC_1:
return response_field(field_info=field_info)
else: # pragma: nocover
return response_field(schema=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField, *, cloned_types: Dict[Type[BaseModel], Type[BaseModel]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__ # type: ignore
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
if PYDANTIC_1:
new_field.field_info = field.field_info
else: # pragma: nocover
new_field.schema = field.schema # type: ignore
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
if PYDANTIC_1:
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
else: # pragma: nocover
new_field.whole_pre_validators = field.whole_pre_validators # type: ignore
new_field.whole_post_validators = field.whole_post_validators # type: ignore
new_field.parse_json = field.parse_json
new_field.shape = field.shape
try:
new_field.populate_validators()
except AttributeError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
new_field._populate_validators() # type: ignore
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def deep_dict_update(main_dict: dict, update_dict: dict) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial003_04_py310.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_response_model/test_tutorial003_04_py310.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_response_model/test_tutorial003_04_py310.py:2: in <module>
from fastapi.exceptions import FastAPIError
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial003_04_py310.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.61s ===============================
| {'total': 1, 'passed': 1, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial003_04_py310.py |
1_fastapi_callee__utils__is_body_allowed_for_status_code__c43120258fa89bc20d6f8ee671b6ead9ab223fc7 | callee | fail-to-pass | https://github.com/fastapi/fastapi | c43120258fa89bc20d6f8ee671b6ead9ab223fc7 | function | is_body_allowed_for_status_code | utils.py | def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 304})
| def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 304})
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo()
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial003_05_py310.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_response_model/test_tutorial003_05_py310.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_response_model/test_tutorial003_05_py310.py:2: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial003_05_py310.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.62s ===============================
| {'total': 3, 'passed': 3, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial003_05_py310.py |
1_fastapi_callee__utils__get_path_param_names__b9d912c6380661647b51c322820fcb4ba08f32d2 | callee | fail-to-pass | https://github.com/fastapi/fastapi | b9d912c6380661647b51c322820fcb4ba08f32d2 | function | get_path_param_names | utils.py | def get_path_param_names(path: str):
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
| def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
| import re
from typing import Dict, Sequence, Set, Type
from starlette.routing import BaseRoute
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseModel
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
def get_flat_models_from_routes(routes: Sequence[BaseRoute]):
body_fields_from_routes = []
responses_from_routes = []
for route in routes:
if route.include_in_schema and isinstance(route, routing.APIRoute):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
):
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
| import re
from typing import Dict, Sequence, Set, Type
from starlette.routing import BaseRoute
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseModel
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
def get_flat_models_from_routes(routes: Sequence[BaseRoute]):
body_fields_from_routes = []
responses_from_routes = []
for route in routes:
if route.include_in_schema and isinstance(route, routing.APIRoute):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
):
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str):
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial003_05_py310.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_response_model/test_tutorial003_05_py310.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_response_model/test_tutorial003_05_py310.py:2: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial003_05_py310.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.62s ===============================
| {'total': 3, 'passed': 3, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial003_05_py310.py |
1_fastapi_callee__utils__create_cloned_field__aa84ac8e3e305d76e71c0c1b31237517948fa12f | callee | fail-to-pass | https://github.com/fastapi/fastapi | aa84ac8e3e305d76e71c0c1b31237517948fa12f | function | create_cloned_field | utils.py | def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(
routes: Sequence[Type[BaseRoute]]
) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
default = None,
required = False,
model_config = BaseConfig,
schema = Schema(None),
)
new_field.has_alias= field.has_alias
new_field.alias= field.alias
new_field.class_validators= field.class_validators
new_field.default= field.default
new_field.required= field.required
new_field.model_config= field.model_config
new_field.schema= field.schema
new_field.allow_none= field.allow_none
new_field.validate_always= field.validate_always
if field.sub_fields:
new_field.sub_fields= [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(
routes: Sequence[Type[BaseRoute]]
) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial003_05_py310.py _
tests/test_tutorial/test_response_model/test_tutorial003_05_py310.py:2: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
fastapi/utils.py:109: in <module>
def create_cloned_field(field: Field) -> Field:
E NameError: name 'Field' is not defined
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial003_05_py310.py - ...
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.61s ===============================
| {'total': 3, 'passed': 3, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial003_05_py310.py |
1_fastapi_callee__utils__generate_operation_id_for_path__687065509b047a642acd1d281d6d68c6698509b1 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 687065509b047a642acd1d281d6d68c6698509b1 | function | generate_operation_id_for_path | utils.py | def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(routes: Sequence[BaseRoute]) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(routes: Sequence[BaseRoute]) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial003_05_py310.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_response_model/test_tutorial003_05_py310.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_response_model/test_tutorial003_05_py310.py:2: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial003_05_py310.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.62s ===============================
| {'total': 3, 'passed': 3, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial003_05_py310.py |
1_fastapi_callee__utils__generate_unique_id__8a0d4c79c18e6bd219b965b9d50578c139e7efd8 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 8a0d4c79c18e6bd219b965b9d50578c139e7efd8 | function | generate_unique_id | utils.py | def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
| def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial003_05_py310.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_response_model/test_tutorial003_05_py310.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_response_model/test_tutorial003_05_py310.py:2: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial003_05_py310.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.61s ===============================
| {'total': 3, 'passed': 3, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial003_05_py310.py |
1_fastapi_callee__utils__deep_dict_update__181a32236a32305788b87d10e7990d9564ae2056 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 181a32236a32305788b87d10e7990d9564ae2056 | function | deep_dict_update | utils.py | def deep_dict_update(main_dict: dict, update_dict: dict) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
| def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
| import functools
import re
from dataclasses import is_dataclass
from enum import Enum
from typing import Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.logger import logger
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
try:
from pydantic.fields import FieldInfo, ModelField, UndefinedType
PYDANTIC_1 = True
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic.fields import Field as ModelField # type: ignore
from pydantic import Schema as FieldInfo # type: ignore
class UndefinedType: # type: ignore
def __repr__(self) -> str:
return "PydanticUndefined"
logger.warning(
"Pydantic versions < 1.0.0 are deprecated in FastAPI and support will be "
"removed soon."
)
PYDANTIC_1 = False
# TODO: remove when removing support for Pydantic < 1.0.0
def get_field_info(field: ModelField) -> FieldInfo:
if PYDANTIC_1:
return field.field_info # type: ignore
else:
return field.schema # type: ignore # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
def warning_response_model_skip_defaults_deprecated() -> None:
logger.warning( # pragma: nocover
"response_model_skip_defaults has been deprecated in favor of "
"response_model_exclude_unset to keep in line with Pydantic v1, support for "
"it will be removed soon."
)
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
# ignore mypy error until enum schemas are released
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX # type: ignore
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
if PYDANTIC_1:
return response_field(field_info=field_info)
else: # pragma: nocover
return response_field(schema=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField, *, cloned_types: Dict[Type[BaseModel], Type[BaseModel]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__ # type: ignore
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
if PYDANTIC_1:
new_field.field_info = field.field_info
else: # pragma: nocover
new_field.schema = field.schema # type: ignore
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
if PYDANTIC_1:
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
else: # pragma: nocover
new_field.whole_pre_validators = field.whole_pre_validators # type: ignore
new_field.whole_post_validators = field.whole_post_validators # type: ignore
new_field.parse_json = field.parse_json
new_field.shape = field.shape
try:
new_field.populate_validators()
except AttributeError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
new_field._populate_validators() # type: ignore
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
| import functools
import re
from dataclasses import is_dataclass
from enum import Enum
from typing import Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.logger import logger
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
try:
from pydantic.fields import FieldInfo, ModelField, UndefinedType
PYDANTIC_1 = True
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic.fields import Field as ModelField # type: ignore
from pydantic import Schema as FieldInfo # type: ignore
class UndefinedType: # type: ignore
def __repr__(self) -> str:
return "PydanticUndefined"
logger.warning(
"Pydantic versions < 1.0.0 are deprecated in FastAPI and support will be "
"removed soon."
)
PYDANTIC_1 = False
# TODO: remove when removing support for Pydantic < 1.0.0
def get_field_info(field: ModelField) -> FieldInfo:
if PYDANTIC_1:
return field.field_info # type: ignore
else:
return field.schema # type: ignore # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
def warning_response_model_skip_defaults_deprecated() -> None:
logger.warning( # pragma: nocover
"response_model_skip_defaults has been deprecated in favor of "
"response_model_exclude_unset to keep in line with Pydantic v1, support for "
"it will be removed soon."
)
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
# ignore mypy error until enum schemas are released
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX # type: ignore
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
if PYDANTIC_1:
return response_field(field_info=field_info)
else: # pragma: nocover
return response_field(schema=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField, *, cloned_types: Dict[Type[BaseModel], Type[BaseModel]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__ # type: ignore
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
if PYDANTIC_1:
new_field.field_info = field.field_info
else: # pragma: nocover
new_field.schema = field.schema # type: ignore
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
if PYDANTIC_1:
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
else: # pragma: nocover
new_field.whole_pre_validators = field.whole_pre_validators # type: ignore
new_field.whole_post_validators = field.whole_post_validators # type: ignore
new_field.parse_json = field.parse_json
new_field.shape = field.shape
try:
new_field.populate_validators()
except AttributeError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
new_field._populate_validators() # type: ignore
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def deep_dict_update(main_dict: dict, update_dict: dict) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial003_05_py310.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_response_model/test_tutorial003_05_py310.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_response_model/test_tutorial003_05_py310.py:2: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial003_05_py310.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.63s ===============================
| {'total': 3, 'passed': 3, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial003_05_py310.py |
1_fastapi_callee__utils__is_body_allowed_for_status_code__c43120258fa89bc20d6f8ee671b6ead9ab223fc7 | callee | fail-to-pass | https://github.com/fastapi/fastapi | c43120258fa89bc20d6f8ee671b6ead9ab223fc7 | function | is_body_allowed_for_status_code | utils.py | def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 304})
| def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 304})
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo()
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial004_py39.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_response_model/test_tutorial004_py39.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_response_model/test_tutorial004_py39.py:3: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial004_py39.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.65s ===============================
| {'total': 4, 'passed': 4, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial004_py39.py |
1_fastapi_callee__utils__get_path_param_names__b9d912c6380661647b51c322820fcb4ba08f32d2 | callee | fail-to-pass | https://github.com/fastapi/fastapi | b9d912c6380661647b51c322820fcb4ba08f32d2 | function | get_path_param_names | utils.py | def get_path_param_names(path: str):
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
| def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
| import re
from typing import Dict, Sequence, Set, Type
from starlette.routing import BaseRoute
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseModel
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
def get_flat_models_from_routes(routes: Sequence[BaseRoute]):
body_fields_from_routes = []
responses_from_routes = []
for route in routes:
if route.include_in_schema and isinstance(route, routing.APIRoute):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
):
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
| import re
from typing import Dict, Sequence, Set, Type
from starlette.routing import BaseRoute
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseModel
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
def get_flat_models_from_routes(routes: Sequence[BaseRoute]):
body_fields_from_routes = []
responses_from_routes = []
for route in routes:
if route.include_in_schema and isinstance(route, routing.APIRoute):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
):
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str):
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial004_py39.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_response_model/test_tutorial004_py39.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_response_model/test_tutorial004_py39.py:3: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial004_py39.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.64s ===============================
| {'total': 4, 'passed': 4, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial004_py39.py |
1_fastapi_callee__utils__create_cloned_field__aa84ac8e3e305d76e71c0c1b31237517948fa12f | callee | fail-to-pass | https://github.com/fastapi/fastapi | aa84ac8e3e305d76e71c0c1b31237517948fa12f | function | create_cloned_field | utils.py | def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(
routes: Sequence[Type[BaseRoute]]
) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
default = None,
required = False,
model_config = BaseConfig,
schema = Schema(None),
)
new_field.has_alias= field.has_alias
new_field.alias= field.alias
new_field.class_validators= field.class_validators
new_field.default= field.default
new_field.required= field.required
new_field.model_config= field.model_config
new_field.schema= field.schema
new_field.allow_none= field.allow_none
new_field.validate_always= field.validate_always
if field.sub_fields:
new_field.sub_fields= [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(
routes: Sequence[Type[BaseRoute]]
) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial004_py39.py _
tests/test_tutorial/test_response_model/test_tutorial004_py39.py:3: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
fastapi/utils.py:109: in <module>
def create_cloned_field(field: Field) -> Field:
E NameError: name 'Field' is not defined
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial004_py39.py - Name...
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.63s ===============================
| {'total': 4, 'passed': 4, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial004_py39.py |
1_fastapi_callee__utils__generate_operation_id_for_path__687065509b047a642acd1d281d6d68c6698509b1 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 687065509b047a642acd1d281d6d68c6698509b1 | function | generate_operation_id_for_path | utils.py | def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(routes: Sequence[BaseRoute]) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(routes: Sequence[BaseRoute]) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial004_py39.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_response_model/test_tutorial004_py39.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_response_model/test_tutorial004_py39.py:3: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial004_py39.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.61s ===============================
| {'total': 4, 'passed': 4, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial004_py39.py |
1_fastapi_callee__utils__generate_unique_id__8a0d4c79c18e6bd219b965b9d50578c139e7efd8 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 8a0d4c79c18e6bd219b965b9d50578c139e7efd8 | function | generate_unique_id | utils.py | def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
| def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial004_py39.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_response_model/test_tutorial004_py39.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_response_model/test_tutorial004_py39.py:3: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial004_py39.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.64s ===============================
| {'total': 4, 'passed': 4, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial004_py39.py |
1_fastapi_callee__utils__deep_dict_update__181a32236a32305788b87d10e7990d9564ae2056 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 181a32236a32305788b87d10e7990d9564ae2056 | function | deep_dict_update | utils.py | def deep_dict_update(main_dict: dict, update_dict: dict) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
| def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
| import functools
import re
from dataclasses import is_dataclass
from enum import Enum
from typing import Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.logger import logger
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
try:
from pydantic.fields import FieldInfo, ModelField, UndefinedType
PYDANTIC_1 = True
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic.fields import Field as ModelField # type: ignore
from pydantic import Schema as FieldInfo # type: ignore
class UndefinedType: # type: ignore
def __repr__(self) -> str:
return "PydanticUndefined"
logger.warning(
"Pydantic versions < 1.0.0 are deprecated in FastAPI and support will be "
"removed soon."
)
PYDANTIC_1 = False
# TODO: remove when removing support for Pydantic < 1.0.0
def get_field_info(field: ModelField) -> FieldInfo:
if PYDANTIC_1:
return field.field_info # type: ignore
else:
return field.schema # type: ignore # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
def warning_response_model_skip_defaults_deprecated() -> None:
logger.warning( # pragma: nocover
"response_model_skip_defaults has been deprecated in favor of "
"response_model_exclude_unset to keep in line with Pydantic v1, support for "
"it will be removed soon."
)
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
# ignore mypy error until enum schemas are released
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX # type: ignore
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
if PYDANTIC_1:
return response_field(field_info=field_info)
else: # pragma: nocover
return response_field(schema=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField, *, cloned_types: Dict[Type[BaseModel], Type[BaseModel]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__ # type: ignore
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
if PYDANTIC_1:
new_field.field_info = field.field_info
else: # pragma: nocover
new_field.schema = field.schema # type: ignore
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
if PYDANTIC_1:
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
else: # pragma: nocover
new_field.whole_pre_validators = field.whole_pre_validators # type: ignore
new_field.whole_post_validators = field.whole_post_validators # type: ignore
new_field.parse_json = field.parse_json
new_field.shape = field.shape
try:
new_field.populate_validators()
except AttributeError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
new_field._populate_validators() # type: ignore
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
| import functools
import re
from dataclasses import is_dataclass
from enum import Enum
from typing import Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.logger import logger
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
try:
from pydantic.fields import FieldInfo, ModelField, UndefinedType
PYDANTIC_1 = True
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic.fields import Field as ModelField # type: ignore
from pydantic import Schema as FieldInfo # type: ignore
class UndefinedType: # type: ignore
def __repr__(self) -> str:
return "PydanticUndefined"
logger.warning(
"Pydantic versions < 1.0.0 are deprecated in FastAPI and support will be "
"removed soon."
)
PYDANTIC_1 = False
# TODO: remove when removing support for Pydantic < 1.0.0
def get_field_info(field: ModelField) -> FieldInfo:
if PYDANTIC_1:
return field.field_info # type: ignore
else:
return field.schema # type: ignore # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
def warning_response_model_skip_defaults_deprecated() -> None:
logger.warning( # pragma: nocover
"response_model_skip_defaults has been deprecated in favor of "
"response_model_exclude_unset to keep in line with Pydantic v1, support for "
"it will be removed soon."
)
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
# ignore mypy error until enum schemas are released
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX # type: ignore
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
if PYDANTIC_1:
return response_field(field_info=field_info)
else: # pragma: nocover
return response_field(schema=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField, *, cloned_types: Dict[Type[BaseModel], Type[BaseModel]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__ # type: ignore
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
if PYDANTIC_1:
new_field.field_info = field.field_info
else: # pragma: nocover
new_field.schema = field.schema # type: ignore
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
if PYDANTIC_1:
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
else: # pragma: nocover
new_field.whole_pre_validators = field.whole_pre_validators # type: ignore
new_field.whole_post_validators = field.whole_post_validators # type: ignore
new_field.parse_json = field.parse_json
new_field.shape = field.shape
try:
new_field.populate_validators()
except AttributeError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
new_field._populate_validators() # type: ignore
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def deep_dict_update(main_dict: dict, update_dict: dict) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial004_py39.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_response_model/test_tutorial004_py39.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_response_model/test_tutorial004_py39.py:3: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial004_py39.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.72s ===============================
| {'total': 4, 'passed': 4, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial004_py39.py |
1_fastapi_callee__utils__is_body_allowed_for_status_code__c43120258fa89bc20d6f8ee671b6ead9ab223fc7 | callee | fail-to-pass | https://github.com/fastapi/fastapi | c43120258fa89bc20d6f8ee671b6ead9ab223fc7 | function | is_body_allowed_for_status_code | utils.py | def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 304})
| def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 304})
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo()
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial004_py310.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_response_model/test_tutorial004_py310.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_response_model/test_tutorial004_py310.py:3: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial004_py310.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.69s ===============================
| {'total': 4, 'passed': 4, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial004_py310.py |
1_fastapi_callee__utils__get_path_param_names__b9d912c6380661647b51c322820fcb4ba08f32d2 | callee | fail-to-pass | https://github.com/fastapi/fastapi | b9d912c6380661647b51c322820fcb4ba08f32d2 | function | get_path_param_names | utils.py | def get_path_param_names(path: str):
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
| def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
| import re
from typing import Dict, Sequence, Set, Type
from starlette.routing import BaseRoute
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseModel
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
def get_flat_models_from_routes(routes: Sequence[BaseRoute]):
body_fields_from_routes = []
responses_from_routes = []
for route in routes:
if route.include_in_schema and isinstance(route, routing.APIRoute):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
):
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
| import re
from typing import Dict, Sequence, Set, Type
from starlette.routing import BaseRoute
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseModel
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
def get_flat_models_from_routes(routes: Sequence[BaseRoute]):
body_fields_from_routes = []
responses_from_routes = []
for route in routes:
if route.include_in_schema and isinstance(route, routing.APIRoute):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
):
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str):
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial004_py310.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_response_model/test_tutorial004_py310.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_response_model/test_tutorial004_py310.py:3: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial004_py310.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.67s ===============================
| {'total': 4, 'passed': 4, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial004_py310.py |
1_fastapi_callee__utils__create_cloned_field__aa84ac8e3e305d76e71c0c1b31237517948fa12f | callee | fail-to-pass | https://github.com/fastapi/fastapi | aa84ac8e3e305d76e71c0c1b31237517948fa12f | function | create_cloned_field | utils.py | def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(
routes: Sequence[Type[BaseRoute]]
) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
default = None,
required = False,
model_config = BaseConfig,
schema = Schema(None),
)
new_field.has_alias= field.has_alias
new_field.alias= field.alias
new_field.class_validators= field.class_validators
new_field.default= field.default
new_field.required= field.required
new_field.model_config= field.model_config
new_field.schema= field.schema
new_field.allow_none= field.allow_none
new_field.validate_always= field.validate_always
if field.sub_fields:
new_field.sub_fields= [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(
routes: Sequence[Type[BaseRoute]]
) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial004_py310.py _
tests/test_tutorial/test_response_model/test_tutorial004_py310.py:3: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
fastapi/utils.py:109: in <module>
def create_cloned_field(field: Field) -> Field:
E NameError: name 'Field' is not defined
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial004_py310.py - Nam...
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.67s ===============================
| {'total': 4, 'passed': 4, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial004_py310.py |
1_fastapi_callee__utils__generate_operation_id_for_path__687065509b047a642acd1d281d6d68c6698509b1 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 687065509b047a642acd1d281d6d68c6698509b1 | function | generate_operation_id_for_path | utils.py | def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(routes: Sequence[BaseRoute]) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(routes: Sequence[BaseRoute]) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial004_py310.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_response_model/test_tutorial004_py310.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_response_model/test_tutorial004_py310.py:3: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial004_py310.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.66s ===============================
| {'total': 4, 'passed': 4, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial004_py310.py |
1_fastapi_callee__utils__generate_unique_id__8a0d4c79c18e6bd219b965b9d50578c139e7efd8 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 8a0d4c79c18e6bd219b965b9d50578c139e7efd8 | function | generate_unique_id | utils.py | def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
| def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial004_py310.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_response_model/test_tutorial004_py310.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_response_model/test_tutorial004_py310.py:3: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial004_py310.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.73s ===============================
| {'total': 4, 'passed': 4, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial004_py310.py |
1_fastapi_callee__utils__deep_dict_update__181a32236a32305788b87d10e7990d9564ae2056 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 181a32236a32305788b87d10e7990d9564ae2056 | function | deep_dict_update | utils.py | def deep_dict_update(main_dict: dict, update_dict: dict) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
| def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
| import functools
import re
from dataclasses import is_dataclass
from enum import Enum
from typing import Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.logger import logger
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
try:
from pydantic.fields import FieldInfo, ModelField, UndefinedType
PYDANTIC_1 = True
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic.fields import Field as ModelField # type: ignore
from pydantic import Schema as FieldInfo # type: ignore
class UndefinedType: # type: ignore
def __repr__(self) -> str:
return "PydanticUndefined"
logger.warning(
"Pydantic versions < 1.0.0 are deprecated in FastAPI and support will be "
"removed soon."
)
PYDANTIC_1 = False
# TODO: remove when removing support for Pydantic < 1.0.0
def get_field_info(field: ModelField) -> FieldInfo:
if PYDANTIC_1:
return field.field_info # type: ignore
else:
return field.schema # type: ignore # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
def warning_response_model_skip_defaults_deprecated() -> None:
logger.warning( # pragma: nocover
"response_model_skip_defaults has been deprecated in favor of "
"response_model_exclude_unset to keep in line with Pydantic v1, support for "
"it will be removed soon."
)
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
# ignore mypy error until enum schemas are released
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX # type: ignore
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
if PYDANTIC_1:
return response_field(field_info=field_info)
else: # pragma: nocover
return response_field(schema=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField, *, cloned_types: Dict[Type[BaseModel], Type[BaseModel]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__ # type: ignore
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
if PYDANTIC_1:
new_field.field_info = field.field_info
else: # pragma: nocover
new_field.schema = field.schema # type: ignore
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
if PYDANTIC_1:
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
else: # pragma: nocover
new_field.whole_pre_validators = field.whole_pre_validators # type: ignore
new_field.whole_post_validators = field.whole_post_validators # type: ignore
new_field.parse_json = field.parse_json
new_field.shape = field.shape
try:
new_field.populate_validators()
except AttributeError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
new_field._populate_validators() # type: ignore
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
| import functools
import re
from dataclasses import is_dataclass
from enum import Enum
from typing import Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.logger import logger
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
try:
from pydantic.fields import FieldInfo, ModelField, UndefinedType
PYDANTIC_1 = True
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic.fields import Field as ModelField # type: ignore
from pydantic import Schema as FieldInfo # type: ignore
class UndefinedType: # type: ignore
def __repr__(self) -> str:
return "PydanticUndefined"
logger.warning(
"Pydantic versions < 1.0.0 are deprecated in FastAPI and support will be "
"removed soon."
)
PYDANTIC_1 = False
# TODO: remove when removing support for Pydantic < 1.0.0
def get_field_info(field: ModelField) -> FieldInfo:
if PYDANTIC_1:
return field.field_info # type: ignore
else:
return field.schema # type: ignore # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
def warning_response_model_skip_defaults_deprecated() -> None:
logger.warning( # pragma: nocover
"response_model_skip_defaults has been deprecated in favor of "
"response_model_exclude_unset to keep in line with Pydantic v1, support for "
"it will be removed soon."
)
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
# ignore mypy error until enum schemas are released
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX # type: ignore
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
if PYDANTIC_1:
return response_field(field_info=field_info)
else: # pragma: nocover
return response_field(schema=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField, *, cloned_types: Dict[Type[BaseModel], Type[BaseModel]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__ # type: ignore
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
if PYDANTIC_1:
new_field.field_info = field.field_info
else: # pragma: nocover
new_field.schema = field.schema # type: ignore
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
if PYDANTIC_1:
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
else: # pragma: nocover
new_field.whole_pre_validators = field.whole_pre_validators # type: ignore
new_field.whole_post_validators = field.whole_post_validators # type: ignore
new_field.parse_json = field.parse_json
new_field.shape = field.shape
try:
new_field.populate_validators()
except AttributeError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
new_field._populate_validators() # type: ignore
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def deep_dict_update(main_dict: dict, update_dict: dict) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial004_py310.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_response_model/test_tutorial004_py310.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_response_model/test_tutorial004_py310.py:3: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial004_py310.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.68s ===============================
| {'total': 4, 'passed': 4, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial004_py310.py |
1_fastapi_callee__utils__is_body_allowed_for_status_code__c43120258fa89bc20d6f8ee671b6ead9ab223fc7 | callee | fail-to-pass | https://github.com/fastapi/fastapi | c43120258fa89bc20d6f8ee671b6ead9ab223fc7 | function | is_body_allowed_for_status_code | utils.py | def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 304})
| def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 304})
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo()
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial005_py310.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_response_model/test_tutorial005_py310.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_response_model/test_tutorial005_py310.py:3: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial005_py310.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.61s ===============================
| {'total': 3, 'passed': 3, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial005_py310.py |
1_fastapi_callee__utils__get_path_param_names__b9d912c6380661647b51c322820fcb4ba08f32d2 | callee | fail-to-pass | https://github.com/fastapi/fastapi | b9d912c6380661647b51c322820fcb4ba08f32d2 | function | get_path_param_names | utils.py | def get_path_param_names(path: str):
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
| def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
| import re
from typing import Dict, Sequence, Set, Type
from starlette.routing import BaseRoute
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseModel
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
def get_flat_models_from_routes(routes: Sequence[BaseRoute]):
body_fields_from_routes = []
responses_from_routes = []
for route in routes:
if route.include_in_schema and isinstance(route, routing.APIRoute):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
):
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
| import re
from typing import Dict, Sequence, Set, Type
from starlette.routing import BaseRoute
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseModel
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
def get_flat_models_from_routes(routes: Sequence[BaseRoute]):
body_fields_from_routes = []
responses_from_routes = []
for route in routes:
if route.include_in_schema and isinstance(route, routing.APIRoute):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
):
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str):
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial005_py310.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_response_model/test_tutorial005_py310.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_response_model/test_tutorial005_py310.py:3: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial005_py310.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.64s ===============================
| {'total': 3, 'passed': 3, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial005_py310.py |
1_fastapi_callee__utils__create_cloned_field__aa84ac8e3e305d76e71c0c1b31237517948fa12f | callee | fail-to-pass | https://github.com/fastapi/fastapi | aa84ac8e3e305d76e71c0c1b31237517948fa12f | function | create_cloned_field | utils.py | def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(
routes: Sequence[Type[BaseRoute]]
) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
default = None,
required = False,
model_config = BaseConfig,
schema = Schema(None),
)
new_field.has_alias= field.has_alias
new_field.alias= field.alias
new_field.class_validators= field.class_validators
new_field.default= field.default
new_field.required= field.required
new_field.model_config= field.model_config
new_field.schema= field.schema
new_field.allow_none= field.allow_none
new_field.validate_always= field.validate_always
if field.sub_fields:
new_field.sub_fields= [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(
routes: Sequence[Type[BaseRoute]]
) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial005_py310.py _
tests/test_tutorial/test_response_model/test_tutorial005_py310.py:3: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
fastapi/utils.py:109: in <module>
def create_cloned_field(field: Field) -> Field:
E NameError: name 'Field' is not defined
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial005_py310.py - Nam...
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.61s ===============================
| {'total': 3, 'passed': 3, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial005_py310.py |
1_fastapi_callee__utils__generate_operation_id_for_path__687065509b047a642acd1d281d6d68c6698509b1 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 687065509b047a642acd1d281d6d68c6698509b1 | function | generate_operation_id_for_path | utils.py | def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(routes: Sequence[BaseRoute]) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(routes: Sequence[BaseRoute]) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial005_py310.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_response_model/test_tutorial005_py310.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_response_model/test_tutorial005_py310.py:3: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial005_py310.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.62s ===============================
| {'total': 3, 'passed': 3, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial005_py310.py |
1_fastapi_callee__utils__generate_unique_id__8a0d4c79c18e6bd219b965b9d50578c139e7efd8 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 8a0d4c79c18e6bd219b965b9d50578c139e7efd8 | function | generate_unique_id | utils.py | def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
| def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial005_py310.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_response_model/test_tutorial005_py310.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_response_model/test_tutorial005_py310.py:3: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial005_py310.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.64s ===============================
| {'total': 3, 'passed': 3, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial005_py310.py |
1_fastapi_callee__utils__deep_dict_update__181a32236a32305788b87d10e7990d9564ae2056 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 181a32236a32305788b87d10e7990d9564ae2056 | function | deep_dict_update | utils.py | def deep_dict_update(main_dict: dict, update_dict: dict) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
| def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
| import functools
import re
from dataclasses import is_dataclass
from enum import Enum
from typing import Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.logger import logger
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
try:
from pydantic.fields import FieldInfo, ModelField, UndefinedType
PYDANTIC_1 = True
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic.fields import Field as ModelField # type: ignore
from pydantic import Schema as FieldInfo # type: ignore
class UndefinedType: # type: ignore
def __repr__(self) -> str:
return "PydanticUndefined"
logger.warning(
"Pydantic versions < 1.0.0 are deprecated in FastAPI and support will be "
"removed soon."
)
PYDANTIC_1 = False
# TODO: remove when removing support for Pydantic < 1.0.0
def get_field_info(field: ModelField) -> FieldInfo:
if PYDANTIC_1:
return field.field_info # type: ignore
else:
return field.schema # type: ignore # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
def warning_response_model_skip_defaults_deprecated() -> None:
logger.warning( # pragma: nocover
"response_model_skip_defaults has been deprecated in favor of "
"response_model_exclude_unset to keep in line with Pydantic v1, support for "
"it will be removed soon."
)
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
# ignore mypy error until enum schemas are released
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX # type: ignore
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
if PYDANTIC_1:
return response_field(field_info=field_info)
else: # pragma: nocover
return response_field(schema=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField, *, cloned_types: Dict[Type[BaseModel], Type[BaseModel]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__ # type: ignore
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
if PYDANTIC_1:
new_field.field_info = field.field_info
else: # pragma: nocover
new_field.schema = field.schema # type: ignore
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
if PYDANTIC_1:
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
else: # pragma: nocover
new_field.whole_pre_validators = field.whole_pre_validators # type: ignore
new_field.whole_post_validators = field.whole_post_validators # type: ignore
new_field.parse_json = field.parse_json
new_field.shape = field.shape
try:
new_field.populate_validators()
except AttributeError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
new_field._populate_validators() # type: ignore
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
| import functools
import re
from dataclasses import is_dataclass
from enum import Enum
from typing import Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.logger import logger
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
try:
from pydantic.fields import FieldInfo, ModelField, UndefinedType
PYDANTIC_1 = True
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic.fields import Field as ModelField # type: ignore
from pydantic import Schema as FieldInfo # type: ignore
class UndefinedType: # type: ignore
def __repr__(self) -> str:
return "PydanticUndefined"
logger.warning(
"Pydantic versions < 1.0.0 are deprecated in FastAPI and support will be "
"removed soon."
)
PYDANTIC_1 = False
# TODO: remove when removing support for Pydantic < 1.0.0
def get_field_info(field: ModelField) -> FieldInfo:
if PYDANTIC_1:
return field.field_info # type: ignore
else:
return field.schema # type: ignore # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
def warning_response_model_skip_defaults_deprecated() -> None:
logger.warning( # pragma: nocover
"response_model_skip_defaults has been deprecated in favor of "
"response_model_exclude_unset to keep in line with Pydantic v1, support for "
"it will be removed soon."
)
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
# ignore mypy error until enum schemas are released
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX # type: ignore
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
if PYDANTIC_1:
return response_field(field_info=field_info)
else: # pragma: nocover
return response_field(schema=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField, *, cloned_types: Dict[Type[BaseModel], Type[BaseModel]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__ # type: ignore
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
if PYDANTIC_1:
new_field.field_info = field.field_info
else: # pragma: nocover
new_field.schema = field.schema # type: ignore
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
if PYDANTIC_1:
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
else: # pragma: nocover
new_field.whole_pre_validators = field.whole_pre_validators # type: ignore
new_field.whole_post_validators = field.whole_post_validators # type: ignore
new_field.parse_json = field.parse_json
new_field.shape = field.shape
try:
new_field.populate_validators()
except AttributeError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
new_field._populate_validators() # type: ignore
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def deep_dict_update(main_dict: dict, update_dict: dict) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_response_model/test_tutorial005_py310.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_response_model/test_tutorial005_py310.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_response_model/test_tutorial005_py310.py:3: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_response_model/test_tutorial005_py310.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.62s ===============================
| {'total': 3, 'passed': 3, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_response_model/test_tutorial005_py310.py |
1_fastapi_callee__utils__is_body_allowed_for_status_code__c43120258fa89bc20d6f8ee671b6ead9ab223fc7 | callee | fail-to-pass | https://github.com/fastapi/fastapi | c43120258fa89bc20d6f8ee671b6ead9ab223fc7 | function | is_body_allowed_for_status_code | utils.py | def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 304})
| def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 304})
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo()
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_async_sql_databases/test_tutorial001.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_async_sql_databases/test_tutorial001.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_async_sql_databases/test_tutorial001.py:2: in <module>
from fastapi import FastAPI
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_async_sql_databases/test_tutorial001.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.61s ===============================
| {'total': 2, 'passed': 1, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 1, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_async_sql_databases/test_tutorial001.py |
1_fastapi_callee__utils__get_path_param_names__b9d912c6380661647b51c322820fcb4ba08f32d2 | callee | fail-to-pass | https://github.com/fastapi/fastapi | b9d912c6380661647b51c322820fcb4ba08f32d2 | function | get_path_param_names | utils.py | def get_path_param_names(path: str):
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
| def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
| import re
from typing import Dict, Sequence, Set, Type
from starlette.routing import BaseRoute
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseModel
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
def get_flat_models_from_routes(routes: Sequence[BaseRoute]):
body_fields_from_routes = []
responses_from_routes = []
for route in routes:
if route.include_in_schema and isinstance(route, routing.APIRoute):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
):
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
| import re
from typing import Dict, Sequence, Set, Type
from starlette.routing import BaseRoute
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseModel
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
def get_flat_models_from_routes(routes: Sequence[BaseRoute]):
body_fields_from_routes = []
responses_from_routes = []
for route in routes:
if route.include_in_schema and isinstance(route, routing.APIRoute):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
):
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str):
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_async_sql_databases/test_tutorial001.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_async_sql_databases/test_tutorial001.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_async_sql_databases/test_tutorial001.py:2: in <module>
from fastapi import FastAPI
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_async_sql_databases/test_tutorial001.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.62s ===============================
| {'total': 2, 'passed': 1, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 1, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_async_sql_databases/test_tutorial001.py |
1_fastapi_callee__utils__create_cloned_field__aa84ac8e3e305d76e71c0c1b31237517948fa12f | callee | fail-to-pass | https://github.com/fastapi/fastapi | aa84ac8e3e305d76e71c0c1b31237517948fa12f | function | create_cloned_field | utils.py | def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(
routes: Sequence[Type[BaseRoute]]
) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
default = None,
required = False,
model_config = BaseConfig,
schema = Schema(None),
)
new_field.has_alias= field.has_alias
new_field.alias= field.alias
new_field.class_validators= field.class_validators
new_field.default= field.default
new_field.required= field.required
new_field.model_config= field.model_config
new_field.schema= field.schema
new_field.allow_none= field.allow_none
new_field.validate_always= field.validate_always
if field.sub_fields:
new_field.sub_fields= [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(
routes: Sequence[Type[BaseRoute]]
) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_async_sql_databases/test_tutorial001.py _
tests/test_tutorial/test_async_sql_databases/test_tutorial001.py:2: in <module>
from fastapi import FastAPI
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
fastapi/utils.py:109: in <module>
def create_cloned_field(field: Field) -> Field:
E NameError: name 'Field' is not defined
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_async_sql_databases/test_tutorial001.py - Name...
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.67s ===============================
| {'total': 2, 'passed': 1, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 1, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_async_sql_databases/test_tutorial001.py |
1_fastapi_callee__utils__generate_operation_id_for_path__687065509b047a642acd1d281d6d68c6698509b1 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 687065509b047a642acd1d281d6d68c6698509b1 | function | generate_operation_id_for_path | utils.py | def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(routes: Sequence[BaseRoute]) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(routes: Sequence[BaseRoute]) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_async_sql_databases/test_tutorial001.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_async_sql_databases/test_tutorial001.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_async_sql_databases/test_tutorial001.py:2: in <module>
from fastapi import FastAPI
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_async_sql_databases/test_tutorial001.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.61s ===============================
| {'total': 2, 'passed': 1, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 1, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_async_sql_databases/test_tutorial001.py |
1_fastapi_callee__utils__generate_unique_id__8a0d4c79c18e6bd219b965b9d50578c139e7efd8 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 8a0d4c79c18e6bd219b965b9d50578c139e7efd8 | function | generate_unique_id | utils.py | def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
| def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_async_sql_databases/test_tutorial001.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_async_sql_databases/test_tutorial001.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_async_sql_databases/test_tutorial001.py:2: in <module>
from fastapi import FastAPI
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_async_sql_databases/test_tutorial001.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.61s ===============================
| {'total': 2, 'passed': 1, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 1, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_async_sql_databases/test_tutorial001.py |
1_fastapi_callee__utils__deep_dict_update__181a32236a32305788b87d10e7990d9564ae2056 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 181a32236a32305788b87d10e7990d9564ae2056 | function | deep_dict_update | utils.py | def deep_dict_update(main_dict: dict, update_dict: dict) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
| def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
| import functools
import re
from dataclasses import is_dataclass
from enum import Enum
from typing import Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.logger import logger
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
try:
from pydantic.fields import FieldInfo, ModelField, UndefinedType
PYDANTIC_1 = True
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic.fields import Field as ModelField # type: ignore
from pydantic import Schema as FieldInfo # type: ignore
class UndefinedType: # type: ignore
def __repr__(self) -> str:
return "PydanticUndefined"
logger.warning(
"Pydantic versions < 1.0.0 are deprecated in FastAPI and support will be "
"removed soon."
)
PYDANTIC_1 = False
# TODO: remove when removing support for Pydantic < 1.0.0
def get_field_info(field: ModelField) -> FieldInfo:
if PYDANTIC_1:
return field.field_info # type: ignore
else:
return field.schema # type: ignore # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
def warning_response_model_skip_defaults_deprecated() -> None:
logger.warning( # pragma: nocover
"response_model_skip_defaults has been deprecated in favor of "
"response_model_exclude_unset to keep in line with Pydantic v1, support for "
"it will be removed soon."
)
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
# ignore mypy error until enum schemas are released
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX # type: ignore
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
if PYDANTIC_1:
return response_field(field_info=field_info)
else: # pragma: nocover
return response_field(schema=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField, *, cloned_types: Dict[Type[BaseModel], Type[BaseModel]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__ # type: ignore
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
if PYDANTIC_1:
new_field.field_info = field.field_info
else: # pragma: nocover
new_field.schema = field.schema # type: ignore
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
if PYDANTIC_1:
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
else: # pragma: nocover
new_field.whole_pre_validators = field.whole_pre_validators # type: ignore
new_field.whole_post_validators = field.whole_post_validators # type: ignore
new_field.parse_json = field.parse_json
new_field.shape = field.shape
try:
new_field.populate_validators()
except AttributeError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
new_field._populate_validators() # type: ignore
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
| import functools
import re
from dataclasses import is_dataclass
from enum import Enum
from typing import Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.logger import logger
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
try:
from pydantic.fields import FieldInfo, ModelField, UndefinedType
PYDANTIC_1 = True
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic.fields import Field as ModelField # type: ignore
from pydantic import Schema as FieldInfo # type: ignore
class UndefinedType: # type: ignore
def __repr__(self) -> str:
return "PydanticUndefined"
logger.warning(
"Pydantic versions < 1.0.0 are deprecated in FastAPI and support will be "
"removed soon."
)
PYDANTIC_1 = False
# TODO: remove when removing support for Pydantic < 1.0.0
def get_field_info(field: ModelField) -> FieldInfo:
if PYDANTIC_1:
return field.field_info # type: ignore
else:
return field.schema # type: ignore # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
def warning_response_model_skip_defaults_deprecated() -> None:
logger.warning( # pragma: nocover
"response_model_skip_defaults has been deprecated in favor of "
"response_model_exclude_unset to keep in line with Pydantic v1, support for "
"it will be removed soon."
)
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
# ignore mypy error until enum schemas are released
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX # type: ignore
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
if PYDANTIC_1:
return response_field(field_info=field_info)
else: # pragma: nocover
return response_field(schema=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField, *, cloned_types: Dict[Type[BaseModel], Type[BaseModel]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__ # type: ignore
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
if PYDANTIC_1:
new_field.field_info = field.field_info
else: # pragma: nocover
new_field.schema = field.schema # type: ignore
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
if PYDANTIC_1:
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
else: # pragma: nocover
new_field.whole_pre_validators = field.whole_pre_validators # type: ignore
new_field.whole_post_validators = field.whole_post_validators # type: ignore
new_field.parse_json = field.parse_json
new_field.shape = field.shape
try:
new_field.populate_validators()
except AttributeError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
new_field._populate_validators() # type: ignore
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def deep_dict_update(main_dict: dict, update_dict: dict) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_async_sql_databases/test_tutorial001.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_async_sql_databases/test_tutorial001.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_async_sql_databases/test_tutorial001.py:2: in <module>
from fastapi import FastAPI
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_async_sql_databases/test_tutorial001.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.61s ===============================
| {'total': 2, 'passed': 1, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 1, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_async_sql_databases/test_tutorial001.py |
1_fastapi_callee__utils__is_body_allowed_for_status_code__c43120258fa89bc20d6f8ee671b6ead9ab223fc7 | callee | fail-to-pass | https://github.com/fastapi/fastapi | c43120258fa89bc20d6f8ee671b6ead9ab223fc7 | function | is_body_allowed_for_status_code | utils.py | def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 304})
| def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 304})
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo()
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_bigger_applications/test_main_an_py39.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_bigger_applications/test_main_an_py39.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_bigger_applications/test_main_an_py39.py:3: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_bigger_applications/test_main_an_py39.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.64s ===============================
| {'total': 25, 'passed': 25, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_bigger_applications/test_main_an_py39.py |
1_fastapi_callee__utils__get_path_param_names__b9d912c6380661647b51c322820fcb4ba08f32d2 | callee | fail-to-pass | https://github.com/fastapi/fastapi | b9d912c6380661647b51c322820fcb4ba08f32d2 | function | get_path_param_names | utils.py | def get_path_param_names(path: str):
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
| def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
| import re
from typing import Dict, Sequence, Set, Type
from starlette.routing import BaseRoute
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseModel
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
def get_flat_models_from_routes(routes: Sequence[BaseRoute]):
body_fields_from_routes = []
responses_from_routes = []
for route in routes:
if route.include_in_schema and isinstance(route, routing.APIRoute):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
):
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
| import re
from typing import Dict, Sequence, Set, Type
from starlette.routing import BaseRoute
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseModel
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
def get_flat_models_from_routes(routes: Sequence[BaseRoute]):
body_fields_from_routes = []
responses_from_routes = []
for route in routes:
if route.include_in_schema and isinstance(route, routing.APIRoute):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
):
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str):
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_bigger_applications/test_main_an_py39.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_bigger_applications/test_main_an_py39.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_bigger_applications/test_main_an_py39.py:3: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_bigger_applications/test_main_an_py39.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.66s ===============================
| {'total': 25, 'passed': 25, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_bigger_applications/test_main_an_py39.py |
1_fastapi_callee__utils__create_cloned_field__aa84ac8e3e305d76e71c0c1b31237517948fa12f | callee | fail-to-pass | https://github.com/fastapi/fastapi | aa84ac8e3e305d76e71c0c1b31237517948fa12f | function | create_cloned_field | utils.py | def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(
routes: Sequence[Type[BaseRoute]]
) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
default = None,
required = False,
model_config = BaseConfig,
schema = Schema(None),
)
new_field.has_alias= field.has_alias
new_field.alias= field.alias
new_field.class_validators= field.class_validators
new_field.default= field.default
new_field.required= field.required
new_field.model_config= field.model_config
new_field.schema= field.schema
new_field.allow_none= field.allow_none
new_field.validate_always= field.validate_always
if field.sub_fields:
new_field.sub_fields= [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(
routes: Sequence[Type[BaseRoute]]
) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_bigger_applications/test_main_an_py39.py _
tests/test_tutorial/test_bigger_applications/test_main_an_py39.py:3: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
fastapi/utils.py:109: in <module>
def create_cloned_field(field: Field) -> Field:
E NameError: name 'Field' is not defined
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_bigger_applications/test_main_an_py39.py - Nam...
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.64s ===============================
| {'total': 25, 'passed': 25, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_bigger_applications/test_main_an_py39.py |
1_fastapi_callee__utils__generate_operation_id_for_path__687065509b047a642acd1d281d6d68c6698509b1 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 687065509b047a642acd1d281d6d68c6698509b1 | function | generate_operation_id_for_path | utils.py | def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(routes: Sequence[BaseRoute]) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| import re
from typing import Any, Dict, List, Sequence, Set, Type, cast
from fastapi import routing
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, Schema, create_model
from pydantic.fields import Field
from pydantic.schema import get_flat_models_from_fields, model_process_schema
from pydantic.utils import lenient_issubclass
from starlette.routing import BaseRoute
def get_flat_models_from_routes(routes: Sequence[BaseRoute]) -> Set[Type[BaseModel]]:
body_fields_from_routes: List[Field] = []
responses_from_routes: List[Field] = []
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, Field
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
def get_model_definitions(
*, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
m_schema, m_definitions = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
def create_cloned_field(field: Field) -> Field:
original_type = field.type_
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = create_model( # type: ignore
original_type.__name__,
__config__=original_type.__config__,
__validators__=original_type.__validators__,
)
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = f
new_field = Field(
name=field.name,
type_=use_type,
class_validators={},
default=None,
required=False,
model_config=BaseConfig,
schema=Schema(None),
)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.schema = field.schema
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field) for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(field.key_field)
new_field.validators = field.validators
new_field.whole_pre_validators = field.whole_pre_validators
new_field.whole_post_validators = field.whole_post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field._populate_validators()
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = operation_id.replace(
"{", "_").replace("}", "_").replace("/", "_")
operation_id = operation_id + "_" + method.lower()
return operation_id
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_bigger_applications/test_main_an_py39.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_bigger_applications/test_main_an_py39.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_bigger_applications/test_main_an_py39.py:3: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_bigger_applications/test_main_an_py39.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.64s ===============================
| {'total': 25, 'passed': 25, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_bigger_applications/test_main_an_py39.py |
1_fastapi_callee__utils__generate_unique_id__8a0d4c79c18e6bd219b965b9d50578c139e7efd8 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 8a0d4c79c18e6bd219b965b9d50578c139e7efd8 | function | generate_unique_id | utils.py | def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
| def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_bigger_applications/test_main_an_py39.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_bigger_applications/test_main_an_py39.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_bigger_applications/test_main_an_py39.py:3: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_bigger_applications/test_main_an_py39.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.66s ===============================
| {'total': 25, 'passed': 25, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_bigger_applications/test_main_an_py39.py |
1_fastapi_callee__utils__deep_dict_update__181a32236a32305788b87d10e7990d9564ae2056 | callee | fail-to-pass | https://github.com/fastapi/fastapi | 181a32236a32305788b87d10e7990d9564ae2056 | function | deep_dict_update | utils.py | def deep_dict_update(main_dict: dict, update_dict: dict) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
| def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
| import functools
import re
from dataclasses import is_dataclass
from enum import Enum
from typing import Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.logger import logger
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
try:
from pydantic.fields import FieldInfo, ModelField, UndefinedType
PYDANTIC_1 = True
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic.fields import Field as ModelField # type: ignore
from pydantic import Schema as FieldInfo # type: ignore
class UndefinedType: # type: ignore
def __repr__(self) -> str:
return "PydanticUndefined"
logger.warning(
"Pydantic versions < 1.0.0 are deprecated in FastAPI and support will be "
"removed soon."
)
PYDANTIC_1 = False
# TODO: remove when removing support for Pydantic < 1.0.0
def get_field_info(field: ModelField) -> FieldInfo:
if PYDANTIC_1:
return field.field_info # type: ignore
else:
return field.schema # type: ignore # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
def warning_response_model_skip_defaults_deprecated() -> None:
logger.warning( # pragma: nocover
"response_model_skip_defaults has been deprecated in favor of "
"response_model_exclude_unset to keep in line with Pydantic v1, support for "
"it will be removed soon."
)
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
# ignore mypy error until enum schemas are released
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX # type: ignore
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
if PYDANTIC_1:
return response_field(field_info=field_info)
else: # pragma: nocover
return response_field(schema=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField, *, cloned_types: Dict[Type[BaseModel], Type[BaseModel]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__ # type: ignore
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
if PYDANTIC_1:
new_field.field_info = field.field_info
else: # pragma: nocover
new_field.schema = field.schema # type: ignore
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
if PYDANTIC_1:
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
else: # pragma: nocover
new_field.whole_pre_validators = field.whole_pre_validators # type: ignore
new_field.whole_post_validators = field.whole_post_validators # type: ignore
new_field.parse_json = field.parse_json
new_field.shape = field.shape
try:
new_field.populate_validators()
except AttributeError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
new_field._populate_validators() # type: ignore
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
| import functools
import re
from dataclasses import is_dataclass
from enum import Enum
from typing import Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.logger import logger
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
try:
from pydantic.fields import FieldInfo, ModelField, UndefinedType
PYDANTIC_1 = True
except ImportError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
from pydantic.fields import Field as ModelField # type: ignore
from pydantic import Schema as FieldInfo # type: ignore
class UndefinedType: # type: ignore
def __repr__(self) -> str:
return "PydanticUndefined"
logger.warning(
"Pydantic versions < 1.0.0 are deprecated in FastAPI and support will be "
"removed soon."
)
PYDANTIC_1 = False
# TODO: remove when removing support for Pydantic < 1.0.0
def get_field_info(field: ModelField) -> FieldInfo:
if PYDANTIC_1:
return field.field_info # type: ignore
else:
return field.schema # type: ignore # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
def warning_response_model_skip_defaults_deprecated() -> None:
logger.warning( # pragma: nocover
"response_model_skip_defaults has been deprecated in favor of "
"response_model_exclude_unset to keep in line with Pydantic v1, support for "
"it will be removed soon."
)
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict] = {}
for model in flat_models:
# ignore mypy error until enum schemas are released
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX # type: ignore
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = False,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo(None)
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
if PYDANTIC_1:
return response_field(field_info=field_info)
else: # pragma: nocover
return response_field(schema=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
)
def create_cloned_field(
field: ModelField, *, cloned_types: Dict[Type[BaseModel], Type[BaseModel]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = dict()
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__ # type: ignore
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
if PYDANTIC_1:
new_field.field_info = field.field_info
else: # pragma: nocover
new_field.schema = field.schema # type: ignore
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
if PYDANTIC_1:
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
else: # pragma: nocover
new_field.whole_pre_validators = field.whole_pre_validators # type: ignore
new_field.whole_post_validators = field.whole_post_validators # type: ignore
new_field.parse_json = field.parse_json
new_field.shape = field.shape
try:
new_field.populate_validators()
except AttributeError: # pragma: nocover
# TODO: remove when removing support for Pydantic < 1.0.0
new_field._populate_validators() # type: ignore
return new_field
def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:
operation_id = name + path
operation_id = re.sub("[^0-9a-zA-Z_]", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def deep_dict_update(main_dict: dict, update_dict: dict) -> None:
for key in update_dict:
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(update_dict[key], dict)
):
deep_dict_update(main_dict[key], update_dict[key])
else:
main_dict[key] = update_dict[key]
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel],
Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(
original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
# type: ignore[attr-defined]
new_field.class_validators = field.class_validators
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.validate_always = field.validate_always
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
# type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators
# type: ignore[attr-defined]
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| import re
import warnings
from dataclasses import is_dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
MutableMapping,
Optional,
Set,
Type,
Union,
cast,
)
from weakref import WeakKeyDictionary
import fastapi
from fastapi._compat import (
PYDANTIC_V2,
BaseConfig,
ModelField,
PydanticSchemaGenerationError,
Undefined,
UndefinedType,
Validator,
lenient_issubclass,
)
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
# Cache for `create_cloned_field`
_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = (
WeakKeyDictionary()
)
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 205, 304})
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_model_field(
name: str,
type_: Any,
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = Undefined,
required: Union[bool, UndefinedType] = Undefined,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
mode: Literal["validation", "serialization"] = "validation",
) -> ModelField:
class_validators = class_validators or {}
if PYDANTIC_V2:
field_info = field_info or FieldInfo(
annotation=type_, default=default, alias=alias
)
else:
field_info = field_info or FieldInfo()
kwargs = {"name": name, "field_info": field_info}
if PYDANTIC_V2:
kwargs.update({"mode": mode})
else:
kwargs.update(
{
"type_": type_,
"class_validators": class_validators,
"default": default,
"required": required,
"model_config": model_config,
"alias": alias,
}
)
try:
return ModelField(**kwargs) # type: ignore[arg-type]
except (RuntimeError, PydanticSchemaGenerationError):
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
if PYDANTIC_V2:
return field
# cloned_types caches already cloned types to support recursive models and improve
# performance by avoiding unnecessary cloning
if cloned_types is None:
cloned_types = _CLONED_TYPES_CACHE
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_model_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias # type: ignore[attr-defined]
new_field.alias = field.alias # type: ignore[misc]
new_field.class_validators = field.class_validators # type: ignore[attr-defined]
new_field.default = field.default # type: ignore[misc]
new_field.required = field.required # type: ignore[misc]
new_field.model_config = field.model_config # type: ignore[attr-defined]
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none # type: ignore[attr-defined]
new_field.validate_always = field.validate_always # type: ignore[attr-defined]
if field.sub_fields: # type: ignore[attr-defined]
new_field.sub_fields = [ # type: ignore[attr-defined]
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields # type: ignore[attr-defined]
]
if field.key_field: # type: ignore[attr-defined]
new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
field.key_field, # type: ignore[attr-defined]
cloned_types=cloned_types,
)
new_field.validators = field.validators # type: ignore[attr-defined]
new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
new_field.post_validators = field.post_validators # type: ignore[attr-defined]
new_field.parse_json = field.parse_json # type: ignore[attr-defined]
new_field.shape = field.shape # type: ignore[attr-defined]
new_field.populate_validators() # type: ignore[attr-defined]
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = f"{name}{path}"
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = f"{operation_id}_{method.lower()}"
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = f"{route.name}{route.path_format}"
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = f"{operation_id}_{list(route.methods)[0].lower()}"
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| ============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.3.2, pluggy-1.5.0 -- /workspace/test_venv/bin/python
cachedir: .pytest_cache
rootdir: /workspace/test_repo
configfile: pyproject.toml
plugins: anyio-3.7.1, asyncio-0.24.0
asyncio: mode=Mode.STRICT, default_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
_ ERROR collecting tests/test_tutorial/test_bigger_applications/test_main_an_py39.py _
ImportError while importing test module '/workspace/test_repo/tests/test_tutorial/test_bigger_applications/test_main_an_py39.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/local/lib/python3.11/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests/test_tutorial/test_bigger_applications/test_main_an_py39.py:3: in <module>
from fastapi.testclient import TestClient
fastapi/__init__.py:7: in <module>
from .applications import FastAPI as FastAPI
fastapi/applications.py:16: in <module>
from fastapi import routing
fastapi/routing.py:35: in <module>
from fastapi.dependencies.utils import (
fastapi/dependencies/utils.py:57: in <module>
from fastapi.utils import create_response_field, get_path_param_names
E ImportError: cannot import name 'create_response_field' from 'fastapi.utils' (/workspace/test_repo/fastapi/utils.py)
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_bigger_applications/test_main_an_py39.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.65s ===============================
| {'total': 25, 'passed': 25, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | {'total': 1, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 1} | ./test_repo/fastapi/utils.py | ./test_repo/tests/test_tutorial/test_bigger_applications/test_main_an_py39.py |
2_transformers_callee__testing_utils__parse_flag_from_env__13deb95a405bbd1037ad233c692d7fd1de9d31e3 | callee | fail-to-pass | https://github.com/huggingface/transformers | 13deb95a405bbd1037ad233c692d7fd1de9d31e3 | function | parse_flag_from_env | testing_utils.py | def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError("If set, {} must be yes or no.".format(key))
return _value
| def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
| import os
import unittest
from distutils.util import strtobool
from transformers.file_utils import _tf_available, _torch_available
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKWOWN_IDENTIFIER = "julien-c/dummy-unknown"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError("If set, {} must be a int.".format(key))
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable
to a truthy value to run them.
"""
if not _run_slow_tests:
test_case = unittest.skip("test is slow")(test_case)
return test_case
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped
by default. Set the RUN_CUSTOM_TOKENIZERS environment variable
to a truthy value to run them.
"""
if not _run_custom_tokenizers:
test_case = unittest.skip("test of custom tokenizers")(test_case)
return test_case
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
if not _torch_available:
test_case = unittest.skip("test requires PyTorch")(test_case)
return test_case
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow.
These tests are skipped when TensorFlow isn't installed.
"""
if not _tf_available:
test_case = unittest.skip("test requires TensorFlow")(test_case)
return test_case
def require_multigpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch).
These tests are skipped on a machine without multiple GPUs.
To run *only* the multigpu tests, assuming all test names contain multigpu:
$ pytest -sv ./tests -k "multigpu"
"""
if not _torch_available:
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() < 2:
return unittest.skip("test requires multiple GPUs")(test_case)
return test_case
if _torch_available:
# Set the USE_CUDA environment variable to select a GPU.
torch_device = "cuda" if parse_flag_from_env("USE_CUDA") else "cpu"
else:
torch_device = None
| import os
import unittest
from distutils.util import strtobool
from transformers.file_utils import _tf_available, _torch_available
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKWOWN_IDENTIFIER = "julien-c/dummy-unknown"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError("If set, {} must be yes or no.".format(key))
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError("If set, {} must be a int.".format(key))
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable
to a truthy value to run them.
"""
if not _run_slow_tests:
test_case = unittest.skip("test is slow")(test_case)
return test_case
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped
by default. Set the RUN_CUSTOM_TOKENIZERS environment variable
to a truthy value to run them.
"""
if not _run_custom_tokenizers:
test_case = unittest.skip("test of custom tokenizers")(test_case)
return test_case
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
if not _torch_available:
test_case = unittest.skip("test requires PyTorch")(test_case)
return test_case
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow.
These tests are skipped when TensorFlow isn't installed.
"""
if not _tf_available:
test_case = unittest.skip("test requires TensorFlow")(test_case)
return test_case
def require_multigpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch).
These tests are skipped on a machine without multiple GPUs.
To run *only* the multigpu tests, assuming all test names contain multigpu:
$ pytest -sv ./tests -k "multigpu"
"""
if not _torch_available:
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() < 2:
return unittest.skip("test requires multiple GPUs")(test_case)
return test_case
if _torch_available:
# Set the USE_CUDA environment variable to select a GPU.
torch_device = "cuda" if parse_flag_from_env("USE_CUDA") else "cpu"
else:
torch_device = None
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env(
"RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(
min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(
torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(
torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError(
"/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(
urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(
f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(
input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(
codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(
r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed,
"cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {
"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {
"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(
f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError(
"Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env("RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env("RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env("RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env("RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError("/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
tr.reportchars = "wPpsxXEf" # emulate -rA (used in summary_passes() and short_test_summary())
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed, "cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError("Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| 2024-09-07 05:55:59.428860: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
2024-09-07 05:55:59.443812: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:485] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered
2024-09-07 05:55:59.459692: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:8454] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered
2024-09-07 05:55:59.464441: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1452] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered
2024-09-07 05:55:59.477118: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: AVX2 AVX512F AVX512_VNNI AVX512_BF16 AVX512_FP16 AVX_VNNI AMX_TILE AMX_INT8 AMX_BF16 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
2024-09-07 05:56:00.550714: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
ImportError while loading conftest '/workspace/test_repo/conftest.py'.
conftest.py:26: in <module>
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
src/transformers/testing_utils.py:54: in <module>
from .utils import (
E ImportError: cannot import name 'GGUF_MIN_VERSION' from 'transformers.utils' (/workspace/test_repo/src/transformers/utils/__init__.py)
| {'total': 17, 'passed': 6, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 3, 'warning': 8, 'error': 0} | {'total': 0, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | ./test_repo/src/transformers/testing_utils.py | ./test_repo/examples/tensorflow/test_tensorflow_examples.py |
2_transformers_callee__testing_utils__parse_int_from_env__13deb95a405bbd1037ad233c692d7fd1de9d31e3 | callee | fail-to-pass | https://github.com/huggingface/transformers | 13deb95a405bbd1037ad233c692d7fd1de9d31e3 | function | parse_int_from_env | testing_utils.py | def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError("If set, {} must be a int.".format(key))
return _value
| def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
| import os
import unittest
from distutils.util import strtobool
from transformers.file_utils import _tf_available, _torch_available
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKWOWN_IDENTIFIER = "julien-c/dummy-unknown"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError("If set, {} must be yes or no.".format(key))
return _value
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable
to a truthy value to run them.
"""
if not _run_slow_tests:
test_case = unittest.skip("test is slow")(test_case)
return test_case
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped
by default. Set the RUN_CUSTOM_TOKENIZERS environment variable
to a truthy value to run them.
"""
if not _run_custom_tokenizers:
test_case = unittest.skip("test of custom tokenizers")(test_case)
return test_case
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
if not _torch_available:
test_case = unittest.skip("test requires PyTorch")(test_case)
return test_case
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow.
These tests are skipped when TensorFlow isn't installed.
"""
if not _tf_available:
test_case = unittest.skip("test requires TensorFlow")(test_case)
return test_case
def require_multigpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch).
These tests are skipped on a machine without multiple GPUs.
To run *only* the multigpu tests, assuming all test names contain multigpu:
$ pytest -sv ./tests -k "multigpu"
"""
if not _torch_available:
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() < 2:
return unittest.skip("test requires multiple GPUs")(test_case)
return test_case
if _torch_available:
# Set the USE_CUDA environment variable to select a GPU.
torch_device = "cuda" if parse_flag_from_env("USE_CUDA") else "cpu"
else:
torch_device = None
| import os
import unittest
from distutils.util import strtobool
from transformers.file_utils import _tf_available, _torch_available
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKWOWN_IDENTIFIER = "julien-c/dummy-unknown"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError("If set, {} must be yes or no.".format(key))
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError("If set, {} must be a int.".format(key))
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable
to a truthy value to run them.
"""
if not _run_slow_tests:
test_case = unittest.skip("test is slow")(test_case)
return test_case
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped
by default. Set the RUN_CUSTOM_TOKENIZERS environment variable
to a truthy value to run them.
"""
if not _run_custom_tokenizers:
test_case = unittest.skip("test of custom tokenizers")(test_case)
return test_case
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
if not _torch_available:
test_case = unittest.skip("test requires PyTorch")(test_case)
return test_case
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow.
These tests are skipped when TensorFlow isn't installed.
"""
if not _tf_available:
test_case = unittest.skip("test requires TensorFlow")(test_case)
return test_case
def require_multigpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch).
These tests are skipped on a machine without multiple GPUs.
To run *only* the multigpu tests, assuming all test names contain multigpu:
$ pytest -sv ./tests -k "multigpu"
"""
if not _torch_available:
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() < 2:
return unittest.skip("test requires multiple GPUs")(test_case)
return test_case
if _torch_available:
# Set the USE_CUDA environment variable to select a GPU.
torch_device = "cuda" if parse_flag_from_env("USE_CUDA") else "cpu"
else:
torch_device = None
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(
min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(
torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(
torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError(
"/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(
urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(
f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(
input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(
codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(
r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed,
"cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {
"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {
"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(
f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError(
"Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env("RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env("RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env("RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env("RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError("/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
tr.reportchars = "wPpsxXEf" # emulate -rA (used in summary_passes() and short_test_summary())
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed, "cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError("Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| 2024-09-07 05:59:15.238069: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
2024-09-07 05:59:15.252358: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:485] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered
2024-09-07 05:59:15.269168: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:8454] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered
2024-09-07 05:59:15.274166: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1452] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered
2024-09-07 05:59:15.287173: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: AVX2 AVX512F AVX512_VNNI AVX512_BF16 AVX512_FP16 AVX_VNNI AMX_TILE AMX_INT8 AMX_BF16 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
2024-09-07 05:59:16.386382: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
ImportError while loading conftest '/workspace/test_repo/conftest.py'.
conftest.py:26: in <module>
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
src/transformers/testing_utils.py:54: in <module>
from .utils import (
E ImportError: cannot import name 'GGUF_MIN_VERSION' from 'transformers.utils' (/workspace/test_repo/src/transformers/utils/__init__.py)
| {'total': 17, 'passed': 6, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 3, 'warning': 8, 'error': 0} | {'total': 0, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | ./test_repo/src/transformers/testing_utils.py | ./test_repo/examples/tensorflow/test_tensorflow_examples.py |
2_transformers_callee__testing_utils__is_pt_tf_cross_test__3a40cdf58d8b7233e561d1bf9e76f2ed02d9c4b7 | callee | fail-to-pass | https://github.com/huggingface/transformers | 3a40cdf58d8b7233e561d1bf9e76f2ed02d9c4b7 | function | is_pt_tf_cross_test | testing_utils.py | def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not _torch_available or not _tf_available:
return unittest.skip("test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
| def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
| import inspect
import logging
import os
import re
import shutil
import sys
import tempfile
import unittest
from distutils.util import strtobool
from io import StringIO
from pathlib import Path
from .file_utils import (
_datasets_available,
_faiss_available,
_flax_available,
_sentencepiece_available,
_tf_available,
_tokenizers_available,
_torch_available,
_torch_tpu_available,
)
from .integrations import _has_optuna, _has_ray
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKWOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError("If set, {} must be yes or no.".format(key))
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError("If set, {} must be a int.".format(key))
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=False)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test.
Pipeline tests are skipped by default and we can run only them by setting RUN_PIPELINE_TEST environment variable
to a truthy value and selecting the is_pipeline_test pytest mark.
"""
if not _run_pipeline_tests:
return unittest.skip("test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable
to a truthy value to run them.
"""
if not _run_slow_tests:
return unittest.skip("test is slow")(test_case)
else:
return test_case
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped
by default. Set the RUN_CUSTOM_TOKENIZERS environment variable
to a truthy value to run them.
"""
if not _run_custom_tokenizers:
return unittest.skip("test of custom tokenizers")(test_case)
else:
return test_case
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
if not _torch_available:
return unittest.skip("test requires PyTorch")(test_case)
else:
return test_case
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow.
These tests are skipped when TensorFlow isn't installed.
"""
if not _tf_available:
return unittest.skip("test requires TensorFlow")(test_case)
else:
return test_case
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax
These tests are skipped when one / both are not installed
"""
if not _flax_available:
test_case = unittest.skip("test requires JAX & Flax")(test_case)
return test_case
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece.
These tests are skipped when SentencePiece isn't installed.
"""
if not _sentencepiece_available:
return unittest.skip("test requires SentencePiece")(test_case)
else:
return test_case
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers.
These tests are skipped when 🤗 Tokenizers isn't installed.
"""
if not _tokenizers_available:
return unittest.skip("test requires tokenizers")(test_case)
else:
return test_case
def require_torch_multigpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch).
These tests are skipped on a machine without multiple GPUs.
To run *only* the multigpu tests, assuming all test names contain multigpu:
$ pytest -sv ./tests -k "multigpu"
"""
if not _torch_available:
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() < 2:
return unittest.skip("test requires multiple GPUs")(test_case)
else:
return test_case
def require_torch_non_multigpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not _torch_available:
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() > 1:
return unittest.skip("test requires 0 or 1 GPU")(test_case)
else:
return test_case
def require_torch_tpu(test_case):
"""
Decorator marking a test that requires a TPU (in PyTorch).
"""
if not _torch_tpu_available:
return unittest.skip("test requires PyTorch TPU")
else:
return test_case
if _torch_available:
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
else:
torch_device = None
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch. """
if torch_device != "cuda":
return unittest.skip("test requires CUDA")(test_case)
else:
return test_case
def require_datasets(test_case):
"""Decorator marking a test that requires datasets."""
if not _datasets_available:
return unittest.skip("test requires `datasets`")(test_case)
else:
return test_case
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
if not _faiss_available:
return unittest.skip("test requires `faiss`")(test_case)
else:
return test_case
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
if not _has_optuna:
return unittest.skip("test requires optuna")(test_case)
else:
return test_case
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
if not _has_ray:
return unittest.skip("test requires Ray/tune")(test_case)
else:
return test_case
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere.
Optionally `append_path` is joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""Context manager to capture:
stdout, clean it up and make it available via obj.out
stderr, and make it available via obj.err
init arguments:
- out - capture stdout: True/False, default True
- err - capture stdout: True/False, default True
Examples:
with CaptureStdout() as cs:
print("Secret message")
print(f"captured: {cs.out}")
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
print(f"captured: {cs.err}")
# to capture just one of the streams, but not the other
with CaptureStd(err=False) as cs:
print("Secret message")
print(f"captured: {cs.out}")
# but best use the stream-specific subclasses
"""
def __init__(self, out=True, err=True):
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
self.out = apply_print_resets(self.out_buf.getvalue())
if self.err_buf:
sys.stderr = self.err_old
self.err = self.err_buf.getvalue()
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
""" Same as CaptureStd but captures only stdout """
def __init__(self):
super().__init__(err=False)
class CaptureStderr(CaptureStd):
""" Same as CaptureStd but captures only stderr """
def __init__(self):
super().__init__(out=False)
class CaptureLogger:
"""Context manager to capture `logging` streams
Args:
- logger: 'logging` logger object
Results:
The captured output is available via `self.out`
Example:
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg+"\n"
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
class TestCasePlus(unittest.TestCase):
"""This class extends `unittest.TestCase` with additional features.
Feature 1: Flexible auto-removable temp dirs which are guaranteed to get
removed at the end of test.
In all the following scenarios the temp dir will be auto-removed at the end
of test, unless `after=False`.
# 1. create a unique temp dir, `tmp_dir` will contain the path to the created temp dir
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
# 2. create a temp dir of my choice and delete it at the end - useful for debug when you want to
# monitor a specific directory
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir(tmp_dir="./tmp/run/test")
# 3. create a temp dir of my choice and do not delete it at the end - useful for when you want
# to look at the temp results
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir(tmp_dir="./tmp/run/test", after=False)
# 4. create a temp dir of my choice and ensure to delete it right away - useful for when you
# disabled deletion in the previous test run and want to make sure the that tmp dir is empty
# before the new test is run
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir(tmp_dir="./tmp/run/test", before=True)
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the
project repository checkout are allowed if an explicit `tmp_dir` is used, so
that by mistake no `/tmp` or similar important part of the filesystem will
get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temp dirs and they all will get
auto-removed, unless requested otherwise.
"""
def setUp(self):
self.teardown_tmp_dirs = []
def get_auto_remove_tmp_dir(self, tmp_dir=None, after=True, before=False):
"""
Args:
tmp_dir (:obj:`string`, `optional`):
use this path, if None a unique path will be assigned
before (:obj:`bool`, `optional`, defaults to :obj:`False`):
if `True` and tmp dir already exists make sure to empty it right away
after (:obj:`bool`, `optional`, defaults to :obj:`True`):
delete the tmp dir at the end of the test
Returns:
tmp_dir(:obj:`string`):
either the same value as passed via `tmp_dir` or the path to the auto-created tmp dir
"""
if tmp_dir is not None:
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def tearDown(self):
# remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
def mockenv(**kwargs):
"""this is a convenience wrapper, that allows this:
@mockenv(RUN_SLOW=True, USE_TF=False)
def test_something():
run_slow = os.getenv("RUN_SLOW", False)
use_tf = os.getenv("USE_TF", False)
"""
return unittest.mock.patch.dict(os.environ, kwargs)
| import inspect
import logging
import os
import re
import shutil
import sys
import tempfile
import unittest
from distutils.util import strtobool
from io import StringIO
from pathlib import Path
from .file_utils import (
_datasets_available,
_faiss_available,
_flax_available,
_sentencepiece_available,
_tf_available,
_tokenizers_available,
_torch_available,
_torch_tpu_available,
)
from .integrations import _has_optuna, _has_ray
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKWOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError("If set, {} must be yes or no.".format(key))
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError("If set, {} must be a int.".format(key))
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=False)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not _torch_available or not _tf_available:
return unittest.skip("test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test.
Pipeline tests are skipped by default and we can run only them by setting RUN_PIPELINE_TEST environment variable
to a truthy value and selecting the is_pipeline_test pytest mark.
"""
if not _run_pipeline_tests:
return unittest.skip("test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable
to a truthy value to run them.
"""
if not _run_slow_tests:
return unittest.skip("test is slow")(test_case)
else:
return test_case
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped
by default. Set the RUN_CUSTOM_TOKENIZERS environment variable
to a truthy value to run them.
"""
if not _run_custom_tokenizers:
return unittest.skip("test of custom tokenizers")(test_case)
else:
return test_case
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
if not _torch_available:
return unittest.skip("test requires PyTorch")(test_case)
else:
return test_case
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow.
These tests are skipped when TensorFlow isn't installed.
"""
if not _tf_available:
return unittest.skip("test requires TensorFlow")(test_case)
else:
return test_case
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax
These tests are skipped when one / both are not installed
"""
if not _flax_available:
test_case = unittest.skip("test requires JAX & Flax")(test_case)
return test_case
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece.
These tests are skipped when SentencePiece isn't installed.
"""
if not _sentencepiece_available:
return unittest.skip("test requires SentencePiece")(test_case)
else:
return test_case
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers.
These tests are skipped when 🤗 Tokenizers isn't installed.
"""
if not _tokenizers_available:
return unittest.skip("test requires tokenizers")(test_case)
else:
return test_case
def require_torch_multigpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch).
These tests are skipped on a machine without multiple GPUs.
To run *only* the multigpu tests, assuming all test names contain multigpu:
$ pytest -sv ./tests -k "multigpu"
"""
if not _torch_available:
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() < 2:
return unittest.skip("test requires multiple GPUs")(test_case)
else:
return test_case
def require_torch_non_multigpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not _torch_available:
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() > 1:
return unittest.skip("test requires 0 or 1 GPU")(test_case)
else:
return test_case
def require_torch_tpu(test_case):
"""
Decorator marking a test that requires a TPU (in PyTorch).
"""
if not _torch_tpu_available:
return unittest.skip("test requires PyTorch TPU")
else:
return test_case
if _torch_available:
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
else:
torch_device = None
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch. """
if torch_device != "cuda":
return unittest.skip("test requires CUDA")(test_case)
else:
return test_case
def require_datasets(test_case):
"""Decorator marking a test that requires datasets."""
if not _datasets_available:
return unittest.skip("test requires `datasets`")(test_case)
else:
return test_case
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
if not _faiss_available:
return unittest.skip("test requires `faiss`")(test_case)
else:
return test_case
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
if not _has_optuna:
return unittest.skip("test requires optuna")(test_case)
else:
return test_case
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
if not _has_ray:
return unittest.skip("test requires Ray/tune")(test_case)
else:
return test_case
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere.
Optionally `append_path` is joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""Context manager to capture:
stdout, clean it up and make it available via obj.out
stderr, and make it available via obj.err
init arguments:
- out - capture stdout: True/False, default True
- err - capture stdout: True/False, default True
Examples:
with CaptureStdout() as cs:
print("Secret message")
print(f"captured: {cs.out}")
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
print(f"captured: {cs.err}")
# to capture just one of the streams, but not the other
with CaptureStd(err=False) as cs:
print("Secret message")
print(f"captured: {cs.out}")
# but best use the stream-specific subclasses
"""
def __init__(self, out=True, err=True):
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
self.out = apply_print_resets(self.out_buf.getvalue())
if self.err_buf:
sys.stderr = self.err_old
self.err = self.err_buf.getvalue()
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
""" Same as CaptureStd but captures only stdout """
def __init__(self):
super().__init__(err=False)
class CaptureStderr(CaptureStd):
""" Same as CaptureStd but captures only stderr """
def __init__(self):
super().__init__(out=False)
class CaptureLogger:
"""Context manager to capture `logging` streams
Args:
- logger: 'logging` logger object
Results:
The captured output is available via `self.out`
Example:
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg+"\n"
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
class TestCasePlus(unittest.TestCase):
"""This class extends `unittest.TestCase` with additional features.
Feature 1: Flexible auto-removable temp dirs which are guaranteed to get
removed at the end of test.
In all the following scenarios the temp dir will be auto-removed at the end
of test, unless `after=False`.
# 1. create a unique temp dir, `tmp_dir` will contain the path to the created temp dir
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
# 2. create a temp dir of my choice and delete it at the end - useful for debug when you want to
# monitor a specific directory
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir(tmp_dir="./tmp/run/test")
# 3. create a temp dir of my choice and do not delete it at the end - useful for when you want
# to look at the temp results
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir(tmp_dir="./tmp/run/test", after=False)
# 4. create a temp dir of my choice and ensure to delete it right away - useful for when you
# disabled deletion in the previous test run and want to make sure the that tmp dir is empty
# before the new test is run
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir(tmp_dir="./tmp/run/test", before=True)
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the
project repository checkout are allowed if an explicit `tmp_dir` is used, so
that by mistake no `/tmp` or similar important part of the filesystem will
get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temp dirs and they all will get
auto-removed, unless requested otherwise.
"""
def setUp(self):
self.teardown_tmp_dirs = []
def get_auto_remove_tmp_dir(self, tmp_dir=None, after=True, before=False):
"""
Args:
tmp_dir (:obj:`string`, `optional`):
use this path, if None a unique path will be assigned
before (:obj:`bool`, `optional`, defaults to :obj:`False`):
if `True` and tmp dir already exists make sure to empty it right away
after (:obj:`bool`, `optional`, defaults to :obj:`True`):
delete the tmp dir at the end of the test
Returns:
tmp_dir(:obj:`string`):
either the same value as passed via `tmp_dir` or the path to the auto-created tmp dir
"""
if tmp_dir is not None:
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def tearDown(self):
# remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
def mockenv(**kwargs):
"""this is a convenience wrapper, that allows this:
@mockenv(RUN_SLOW=True, USE_TF=False)
def test_something():
run_slow = os.getenv("RUN_SLOW", False)
use_tf = os.getenv("USE_TF", False)
"""
return unittest.mock.patch.dict(os.environ, kwargs)
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env(
"RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(
min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(
torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(
torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError(
"/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(
urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(
f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(
input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(
codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(
r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed,
"cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {
"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {
"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(
f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError(
"Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env("RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env("RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env("RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env("RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError("/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
tr.reportchars = "wPpsxXEf" # emulate -rA (used in summary_passes() and short_test_summary())
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed, "cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError("Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| 2024-09-07 06:02:34.905572: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
2024-09-07 06:02:34.921037: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:485] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered
2024-09-07 06:02:34.937214: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:8454] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered
2024-09-07 06:02:34.942057: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1452] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered
2024-09-07 06:02:34.955509: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: AVX2 AVX512F AVX512_VNNI AVX512_BF16 AVX512_FP16 AVX_VNNI AMX_TILE AMX_INT8 AMX_BF16 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
2024-09-07 06:02:36.384019: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
ImportError while loading conftest '/workspace/test_repo/conftest.py'.
conftest.py:26: in <module>
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
src/transformers/testing_utils.py:54: in <module>
from .utils import (
E ImportError: cannot import name 'GGUF_MIN_VERSION' from 'transformers.utils' (/workspace/test_repo/src/transformers/utils/__init__.py)
| {'total': 17, 'passed': 6, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 3, 'warning': 8, 'error': 0} | {'total': 0, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | ./test_repo/src/transformers/testing_utils.py | ./test_repo/examples/tensorflow/test_tensorflow_examples.py |
2_transformers_callee__testing_utils__is_pt_flax_cross_test__9f8619c6aa1f991dacbd8aba4f82b5f52f690f1f | callee | fail-to-pass | https://github.com/huggingface/transformers | 9f8619c6aa1f991dacbd8aba4f82b5f52f690f1f | function | is_pt_flax_cross_test | testing_utils.py | def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip("test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
| def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import inspect
import logging
import os
import re
import shutil
import sys
import tempfile
import unittest
from distutils.util import strtobool
from io import StringIO
from pathlib import Path
from .file_utils import (
is_datasets_available,
is_faiss_available,
is_flax_available,
is_onnx_available,
is_pandas_available,
is_scatter_available,
is_sentencepiece_available,
is_soundfile_availble,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_torch_tpu_available,
is_torchaudio_available,
)
from .integrations import is_optuna_available, is_ray_available
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKWOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError("If set, {} must be yes or no.".format(key))
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError("If set, {} must be a int.".format(key))
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=False)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=False)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=False)
_run_git_lfs_tests = parse_flag_from_env("RUN_GIT_LFS_TESTS", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip("test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test.
Pipeline tests are skipped by default and we can run only them by setting RUN_PIPELINE_TESTS environment variable
to a truthy value and selecting the is_pipeline_test pytest mark.
"""
if not _run_pipeline_tests:
return unittest.skip("test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
if not _run_slow_tests:
return unittest.skip("test is slow")(test_case)
else:
return test_case
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip("test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
if not _run_custom_tokenizers:
return unittest.skip("test of custom tokenizers")(test_case)
else:
return test_case
def require_git_lfs(test_case):
"""
Decorator marking a test that requires git-lfs.
git-lfs requires additional dependencies, and tests are skipped by default. Set the RUN_GIT_LFS_TESTS environment
variable to a truthy value to run them.
"""
if not _run_git_lfs_tests:
return unittest.skip("test of git lfs workflow")(test_case)
else:
return test_case
def require_onnx(test_case):
if not is_onnx_available():
return unittest.skip("test requires ONNX")(test_case)
else:
return test_case
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
else:
return test_case
def require_torch_scatter(test_case):
"""
Decorator marking a test that requires PyTorch scatter.
These tests are skipped when PyTorch scatter isn't installed.
"""
if not is_scatter_available():
return unittest.skip("test requires PyTorch scatter")(test_case)
else:
return test_case
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio.
These tests are skipped when torchaudio isn't installed.
"""
if not is_torchaudio_available:
return unittest.skip("test requires torchaudio")(test_case)
else:
return test_case
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow.
These tests are skipped when TensorFlow isn't installed.
"""
if not is_tf_available():
return unittest.skip("test requires TensorFlow")(test_case)
else:
return test_case
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax
These tests are skipped when one / both are not installed
"""
if not is_flax_available():
test_case = unittest.skip("test requires JAX & Flax")(test_case)
return test_case
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece.
These tests are skipped when SentencePiece isn't installed.
"""
if not is_sentencepiece_available():
return unittest.skip("test requires SentencePiece")(test_case)
else:
return test_case
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers.
These tests are skipped when 🤗 Tokenizers isn't installed.
"""
if not is_tokenizers_available():
return unittest.skip("test requires tokenizers")(test_case)
else:
return test_case
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
if not is_pandas_available():
return unittest.skip("test requires pandas")(test_case)
else:
return test_case
def require_scatter(test_case):
"""
Decorator marking a test that requires PyTorch Scatter. These tests are skipped when PyTorch Scatter isn't
installed.
"""
if not is_scatter_available():
return unittest.skip("test requires PyTorch Scatter")(test_case)
else:
return test_case
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch).
These tests are skipped on a machine without multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() < 2:
return unittest.skip("test requires multiple GPUs")(test_case)
else:
return test_case
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() > 1:
return unittest.skip("test requires 0 or 1 GPU")(test_case)
else:
return test_case
def require_torch_tpu(test_case):
"""
Decorator marking a test that requires a TPU (in PyTorch).
"""
if not is_torch_tpu_available():
return unittest.skip("test requires PyTorch TPU")
else:
return test_case
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
else:
torch_device = None
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch. """
if torch_device != "cuda":
return unittest.skip("test requires CUDA")(test_case)
else:
return test_case
def require_datasets(test_case):
"""Decorator marking a test that requires datasets."""
if not is_datasets_available():
return unittest.skip("test requires `datasets`")(test_case)
else:
return test_case
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
if not is_faiss_available():
return unittest.skip("test requires `faiss`")(test_case)
else:
return test_case
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
if not is_optuna_available():
return unittest.skip("test requires optuna")(test_case)
else:
return test_case
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
if not is_ray_available():
return unittest.skip("test requires Ray/tune")(test_case)
else:
return test_case
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
if not is_soundfile_availble():
return unittest.skip("test requires soundfile")(test_case)
else:
return test_case
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch or tf is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
stdout, clean it up and make it available via obj.out stderr, and make it available via obj.err
init arguments: - out - capture stdout: True/False, default True - err - capture stdout: True/False, default
True
Examples::
with CaptureStdout() as cs:
print("Secret message")
print(f"captured: {cs.out}")
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
print(f"captured: {cs.err}")
# to capture just one of the streams, but not the other
with CaptureStd(err=False) as cs:
print("Secret message")
print(f"captured: {cs.out}")
# but best use the stream-specific subclasses
"""
def __init__(self, out=True, err=True):
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
self.out = apply_print_resets(self.out_buf.getvalue())
if self.err_buf:
sys.stderr = self.err_old
self.err = self.err_buf.getvalue()
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
""" Same as CaptureStd but captures only stdout """
def __init__(self):
super().__init__(err=False)
class CaptureStderr(CaptureStd):
""" Same as CaptureStd but captures only stderr """
def __init__(self):
super().__init__(out=False)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
- logger: 'logging` logger object
Results:
The captured output is available via `self.out`
Example::
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg+"\n"
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
class TestCasePlus(unittest.TestCase):
"""
This class extends `unittest.TestCase` with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
* ``pathlib`` objects (all fully resolved):
- ``test_file_path`` - the current test file path (=``__file__``)
- ``test_file_dir`` - the directory containing the current test file
- ``tests_dir`` - the directory of the ``tests`` test suite
- ``examples_dir`` - the directory of the ``examples`` test suite
- ``repo_root_dir`` - the directory of the repository
- ``src_dir`` - the directory of ``src`` (i.e. where the ``transformers`` sub-dir resides)
* stringified paths---same as above but these return paths as strings, rather than ``pathlib`` objects:
- ``test_file_path_str``
- ``test_file_dir_str``
- ``tests_dir_str``
- ``examples_dir_str``
- ``repo_root_dir_str``
- ``src_dir_str``
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
::
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
``tmp_dir`` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
::
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the ``before`` and ``after`` args, leading to the
following behavior:
``before=True``: the temporary dir will always be cleared at the beginning of the test.
``before=False``: if the temporary dir already existed, any existing files will remain there.
``after=True``: the temporary dir will always be deleted at the end of the test.
``after=False``: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of ``rm -r`` safely, only subdirs of the project repository checkout are
allowed if an explicit ``tmp_dir`` is used, so that by mistake no ``/tmp`` or similar important part of the
filesystem will get nuked. i.e. please always pass paths that start with ``./``
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the ``os.environ`` object that sets up ``PYTHONPATH`` specific to the current test suite.
This is useful for invoking external programs from the test suite - e.g. distributed training.
::
def test_whatever(self):
env = self.get_env()
"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the ``os.environ`` object that sets up ``PYTHONPATH`` correctly, depending on the test suite
it's invoked from. This is useful for invoking external programs from the test suite - e.g. distributed
training.
It always inserts ``./src`` first, then ``./tests`` or ``./examples`` depending on the test suite type and
finally the preset ``PYTHONPATH`` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (:obj:`string`, `optional`):
if :obj:`None`:
- a unique temporary path will be created
- sets ``before=True`` if ``before`` is :obj:`None`
- sets ``after=True`` if ``after`` is :obj:`None`
else:
- :obj:`tmp_dir` will be created
- sets ``before=True`` if ``before`` is :obj:`None`
- sets ``after=False`` if ``after`` is :obj:`None`
before (:obj:`bool`, `optional`):
If :obj:`True` and the :obj:`tmp_dir` already exists, make sure to empty it right away if :obj:`False`
and the :obj:`tmp_dir` already exists, any existing files will remain there.
after (:obj:`bool`, `optional`):
If :obj:`True`, delete the :obj:`tmp_dir` at the end of the test if :obj:`False`, leave the
:obj:`tmp_dir` and its contents intact at the end of the test.
Returns:
tmp_dir(:obj:`string`): either the same value as passed via `tmp_dir` or the path to the auto-selected tmp
dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False)
def test_something():
run_slow = os.getenv("RUN_SLOW", False)
use_tf = os.getenv("USE_TF", False)
"""
return unittest.mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the ``os.environ`` dictionary in-place. Similar to mockenv
The ``os.environ`` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should
pytest do internal changes - also it calls default internal methods of terminalreporter which
can be hijacked by various `pytest-` plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = "reports"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{id}_{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
with open(report_files["passes"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import inspect
import logging
import os
import re
import shutil
import sys
import tempfile
import unittest
from distutils.util import strtobool
from io import StringIO
from pathlib import Path
from .file_utils import (
is_datasets_available,
is_faiss_available,
is_flax_available,
is_onnx_available,
is_pandas_available,
is_scatter_available,
is_sentencepiece_available,
is_soundfile_availble,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_torch_tpu_available,
is_torchaudio_available,
)
from .integrations import is_optuna_available, is_ray_available
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKWOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError("If set, {} must be yes or no.".format(key))
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError("If set, {} must be a int.".format(key))
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=False)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=False)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=False)
_run_git_lfs_tests = parse_flag_from_env("RUN_GIT_LFS_TESTS", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip("test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip("test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test.
Pipeline tests are skipped by default and we can run only them by setting RUN_PIPELINE_TESTS environment variable
to a truthy value and selecting the is_pipeline_test pytest mark.
"""
if not _run_pipeline_tests:
return unittest.skip("test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
if not _run_slow_tests:
return unittest.skip("test is slow")(test_case)
else:
return test_case
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip("test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
if not _run_custom_tokenizers:
return unittest.skip("test of custom tokenizers")(test_case)
else:
return test_case
def require_git_lfs(test_case):
"""
Decorator marking a test that requires git-lfs.
git-lfs requires additional dependencies, and tests are skipped by default. Set the RUN_GIT_LFS_TESTS environment
variable to a truthy value to run them.
"""
if not _run_git_lfs_tests:
return unittest.skip("test of git lfs workflow")(test_case)
else:
return test_case
def require_onnx(test_case):
if not is_onnx_available():
return unittest.skip("test requires ONNX")(test_case)
else:
return test_case
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
else:
return test_case
def require_torch_scatter(test_case):
"""
Decorator marking a test that requires PyTorch scatter.
These tests are skipped when PyTorch scatter isn't installed.
"""
if not is_scatter_available():
return unittest.skip("test requires PyTorch scatter")(test_case)
else:
return test_case
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio.
These tests are skipped when torchaudio isn't installed.
"""
if not is_torchaudio_available:
return unittest.skip("test requires torchaudio")(test_case)
else:
return test_case
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow.
These tests are skipped when TensorFlow isn't installed.
"""
if not is_tf_available():
return unittest.skip("test requires TensorFlow")(test_case)
else:
return test_case
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax
These tests are skipped when one / both are not installed
"""
if not is_flax_available():
test_case = unittest.skip("test requires JAX & Flax")(test_case)
return test_case
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece.
These tests are skipped when SentencePiece isn't installed.
"""
if not is_sentencepiece_available():
return unittest.skip("test requires SentencePiece")(test_case)
else:
return test_case
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers.
These tests are skipped when 🤗 Tokenizers isn't installed.
"""
if not is_tokenizers_available():
return unittest.skip("test requires tokenizers")(test_case)
else:
return test_case
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
if not is_pandas_available():
return unittest.skip("test requires pandas")(test_case)
else:
return test_case
def require_scatter(test_case):
"""
Decorator marking a test that requires PyTorch Scatter. These tests are skipped when PyTorch Scatter isn't
installed.
"""
if not is_scatter_available():
return unittest.skip("test requires PyTorch Scatter")(test_case)
else:
return test_case
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch).
These tests are skipped on a machine without multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() < 2:
return unittest.skip("test requires multiple GPUs")(test_case)
else:
return test_case
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() > 1:
return unittest.skip("test requires 0 or 1 GPU")(test_case)
else:
return test_case
def require_torch_tpu(test_case):
"""
Decorator marking a test that requires a TPU (in PyTorch).
"""
if not is_torch_tpu_available():
return unittest.skip("test requires PyTorch TPU")
else:
return test_case
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
else:
torch_device = None
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch. """
if torch_device != "cuda":
return unittest.skip("test requires CUDA")(test_case)
else:
return test_case
def require_datasets(test_case):
"""Decorator marking a test that requires datasets."""
if not is_datasets_available():
return unittest.skip("test requires `datasets`")(test_case)
else:
return test_case
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
if not is_faiss_available():
return unittest.skip("test requires `faiss`")(test_case)
else:
return test_case
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
if not is_optuna_available():
return unittest.skip("test requires optuna")(test_case)
else:
return test_case
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
if not is_ray_available():
return unittest.skip("test requires Ray/tune")(test_case)
else:
return test_case
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
if not is_soundfile_availble():
return unittest.skip("test requires soundfile")(test_case)
else:
return test_case
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch or tf is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
stdout, clean it up and make it available via obj.out stderr, and make it available via obj.err
init arguments: - out - capture stdout: True/False, default True - err - capture stdout: True/False, default
True
Examples::
with CaptureStdout() as cs:
print("Secret message")
print(f"captured: {cs.out}")
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
print(f"captured: {cs.err}")
# to capture just one of the streams, but not the other
with CaptureStd(err=False) as cs:
print("Secret message")
print(f"captured: {cs.out}")
# but best use the stream-specific subclasses
"""
def __init__(self, out=True, err=True):
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
self.out = apply_print_resets(self.out_buf.getvalue())
if self.err_buf:
sys.stderr = self.err_old
self.err = self.err_buf.getvalue()
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
""" Same as CaptureStd but captures only stdout """
def __init__(self):
super().__init__(err=False)
class CaptureStderr(CaptureStd):
""" Same as CaptureStd but captures only stderr """
def __init__(self):
super().__init__(out=False)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
- logger: 'logging` logger object
Results:
The captured output is available via `self.out`
Example::
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg+"\n"
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
class TestCasePlus(unittest.TestCase):
"""
This class extends `unittest.TestCase` with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
* ``pathlib`` objects (all fully resolved):
- ``test_file_path`` - the current test file path (=``__file__``)
- ``test_file_dir`` - the directory containing the current test file
- ``tests_dir`` - the directory of the ``tests`` test suite
- ``examples_dir`` - the directory of the ``examples`` test suite
- ``repo_root_dir`` - the directory of the repository
- ``src_dir`` - the directory of ``src`` (i.e. where the ``transformers`` sub-dir resides)
* stringified paths---same as above but these return paths as strings, rather than ``pathlib`` objects:
- ``test_file_path_str``
- ``test_file_dir_str``
- ``tests_dir_str``
- ``examples_dir_str``
- ``repo_root_dir_str``
- ``src_dir_str``
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
::
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
``tmp_dir`` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
::
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the ``before`` and ``after`` args, leading to the
following behavior:
``before=True``: the temporary dir will always be cleared at the beginning of the test.
``before=False``: if the temporary dir already existed, any existing files will remain there.
``after=True``: the temporary dir will always be deleted at the end of the test.
``after=False``: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of ``rm -r`` safely, only subdirs of the project repository checkout are
allowed if an explicit ``tmp_dir`` is used, so that by mistake no ``/tmp`` or similar important part of the
filesystem will get nuked. i.e. please always pass paths that start with ``./``
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the ``os.environ`` object that sets up ``PYTHONPATH`` specific to the current test suite.
This is useful for invoking external programs from the test suite - e.g. distributed training.
::
def test_whatever(self):
env = self.get_env()
"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the ``os.environ`` object that sets up ``PYTHONPATH`` correctly, depending on the test suite
it's invoked from. This is useful for invoking external programs from the test suite - e.g. distributed
training.
It always inserts ``./src`` first, then ``./tests`` or ``./examples`` depending on the test suite type and
finally the preset ``PYTHONPATH`` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (:obj:`string`, `optional`):
if :obj:`None`:
- a unique temporary path will be created
- sets ``before=True`` if ``before`` is :obj:`None`
- sets ``after=True`` if ``after`` is :obj:`None`
else:
- :obj:`tmp_dir` will be created
- sets ``before=True`` if ``before`` is :obj:`None`
- sets ``after=False`` if ``after`` is :obj:`None`
before (:obj:`bool`, `optional`):
If :obj:`True` and the :obj:`tmp_dir` already exists, make sure to empty it right away if :obj:`False`
and the :obj:`tmp_dir` already exists, any existing files will remain there.
after (:obj:`bool`, `optional`):
If :obj:`True`, delete the :obj:`tmp_dir` at the end of the test if :obj:`False`, leave the
:obj:`tmp_dir` and its contents intact at the end of the test.
Returns:
tmp_dir(:obj:`string`): either the same value as passed via `tmp_dir` or the path to the auto-selected tmp
dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False)
def test_something():
run_slow = os.getenv("RUN_SLOW", False)
use_tf = os.getenv("USE_TF", False)
"""
return unittest.mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the ``os.environ`` dictionary in-place. Similar to mockenv
The ``os.environ`` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should
pytest do internal changes - also it calls default internal methods of terminalreporter which
can be hijacked by various `pytest-` plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = "reports"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{id}_{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
with open(report_files["passes"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env(
"RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(
min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(
torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(
torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError(
"/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(
urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(
f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(
input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(
codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(
r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed,
"cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {
"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {
"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(
f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError(
"Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env("RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env("RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env("RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env("RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError("/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
tr.reportchars = "wPpsxXEf" # emulate -rA (used in summary_passes() and short_test_summary())
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed, "cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError("Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| 2024-09-07 06:06:15.630056: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
2024-09-07 06:06:15.645264: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:485] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered
2024-09-07 06:06:15.660837: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:8454] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered
2024-09-07 06:06:15.665550: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1452] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered
2024-09-07 06:06:15.678069: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: AVX2 AVX512F AVX512_VNNI AVX512_BF16 AVX512_FP16 AVX_VNNI AMX_TILE AMX_INT8 AMX_BF16 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
2024-09-07 06:06:16.717034: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
ImportError while loading conftest '/workspace/test_repo/conftest.py'.
conftest.py:26: in <module>
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
src/transformers/testing_utils.py:54: in <module>
from .utils import (
E ImportError: cannot import name 'GGUF_MIN_VERSION' from 'transformers.utils' (/workspace/test_repo/src/transformers/utils/__init__.py)
| {'total': 17, 'passed': 6, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 3, 'warning': 8, 'error': 0} | {'total': 0, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | ./test_repo/src/transformers/testing_utils.py | ./test_repo/examples/tensorflow/test_tensorflow_examples.py |
2_transformers_callee__testing_utils__is_staging_test__bf2e0cf70b68e0d46cdf15a4ece1f5c0a03de084 | callee | fail-to-pass | https://github.com/huggingface/transformers | bf2e0cf70b68e0d46cdf15a4ece1f5c0a03de084 | function | is_staging_test | testing_utils.py | def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip("test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
| def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import inspect
import logging
import os
import re
import shutil
import sys
import tempfile
import unittest
from distutils.util import strtobool
from io import StringIO
from pathlib import Path
from typing import Iterator, Union
from .file_utils import (
is_datasets_available,
is_faiss_available,
is_flax_available,
is_onnx_available,
is_pandas_available,
is_scatter_available,
is_sentencepiece_available,
is_soundfile_availble,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_torch_tpu_available,
is_torchaudio_available,
is_vision_available,
)
from .integrations import is_optuna_available, is_ray_available
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKWOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
PASS = "__DUMMY_TRANSFORMERS_PASS__"
ENDPOINT_STAGING = "https://moon-staging.huggingface.co"
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=False)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=False)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=False)
_run_git_lfs_tests = parse_flag_from_env("RUN_GIT_LFS_TESTS", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip("test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip("test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test.
Pipeline tests are skipped by default and we can run only them by setting RUN_PIPELINE_TESTS environment variable
to a truthy value and selecting the is_pipeline_test pytest mark.
"""
if not _run_pipeline_tests:
return unittest.skip("test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
if not _run_slow_tests:
return unittest.skip("test is slow")(test_case)
else:
return test_case
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip("test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
if not _run_custom_tokenizers:
return unittest.skip("test of custom tokenizers")(test_case)
else:
return test_case
def require_git_lfs(test_case):
"""
Decorator marking a test that requires git-lfs.
git-lfs requires additional dependencies, and tests are skipped by default. Set the RUN_GIT_LFS_TESTS environment
variable to a truthy value to run them.
"""
if not _run_git_lfs_tests:
return unittest.skip("test of git lfs workflow")(test_case)
else:
return test_case
def require_onnx(test_case):
if not is_onnx_available():
return unittest.skip("test requires ONNX")(test_case)
else:
return test_case
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
else:
return test_case
def require_torch_scatter(test_case):
"""
Decorator marking a test that requires PyTorch scatter.
These tests are skipped when PyTorch scatter isn't installed.
"""
if not is_scatter_available():
return unittest.skip("test requires PyTorch scatter")(test_case)
else:
return test_case
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
if not is_torchaudio_available():
return unittest.skip("test requires torchaudio")(test_case)
else:
return test_case
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
if not is_tf_available():
return unittest.skip("test requires TensorFlow")(test_case)
else:
return test_case
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
if not is_flax_available():
test_case = unittest.skip("test requires JAX & Flax")(test_case)
return test_case
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
if not is_sentencepiece_available():
return unittest.skip("test requires SentencePiece")(test_case)
else:
return test_case
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
if not is_tokenizers_available():
return unittest.skip("test requires tokenizers")(test_case)
else:
return test_case
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
if not is_pandas_available():
return unittest.skip("test requires pandas")(test_case)
else:
return test_case
def require_scatter(test_case):
"""
Decorator marking a test that requires PyTorch Scatter. These tests are skipped when PyTorch Scatter isn't
installed.
"""
if not is_scatter_available():
return unittest.skip("test requires PyTorch Scatter")(test_case)
else:
return test_case
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
if not is_vision_available():
return unittest.skip("test requires vision")(test_case)
else:
return test_case
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() < 2:
return unittest.skip("test requires multiple GPUs")(test_case)
else:
return test_case
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() > 1:
return unittest.skip("test requires 0 or 1 GPU")(test_case)
else:
return test_case
def require_torch_tpu(test_case):
"""
Decorator marking a test that requires a TPU (in PyTorch).
"""
if not is_torch_tpu_available():
return unittest.skip("test requires PyTorch TPU")
else:
return test_case
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch. """
if torch_device != "cuda":
return unittest.skip("test requires CUDA")(test_case)
else:
return test_case
def require_datasets(test_case):
"""Decorator marking a test that requires datasets."""
if not is_datasets_available():
return unittest.skip("test requires `datasets`")(test_case)
else:
return test_case
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
if not is_faiss_available():
return unittest.skip("test requires `faiss`")(test_case)
else:
return test_case
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
if not is_optuna_available():
return unittest.skip("test requires optuna")(test_case)
else:
return test_case
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
if not is_ray_available():
return unittest.skip("test requires Ray/tune")(test_case)
else:
return test_case
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
if not is_soundfile_availble():
return unittest.skip("test requires soundfile")(test_case)
else:
return test_case
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch or tf is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout, clean it up and make it available via obj.out
- stderr, and make it available via obj.err
init arguments:
- out - capture stdout: True/False, default True
- err - capture stdout: True/False, default True
Examples::
with CaptureStdout() as cs:
print("Secret message")
print(f"captured: {cs.out}")
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
print(f"captured: {cs.err}")
# to capture just one of the streams, but not the other
with CaptureStd(err=False) as cs:
print("Secret message")
print(f"captured: {cs.out}")
# but best use the stream-specific subclasses
"""
def __init__(self, out=True, err=True):
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
self.out = apply_print_resets(self.out_buf.getvalue())
if self.err_buf:
sys.stderr = self.err_old
self.err = self.err_buf.getvalue()
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
""" Same as CaptureStd but captures only stdout """
def __init__(self):
super().__init__(err=False)
class CaptureStderr(CaptureStd):
""" Same as CaptureStd but captures only stderr """
def __init__(self):
super().__init__(out=False)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
- logger: 'logging` logger object
Results:
The captured output is available via `self.out`
Example::
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg+"\n"
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage ::
with ExtendSysPath('/path/to/dir'):
mymodule = importlib.import_module('mymodule')
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends `unittest.TestCase` with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
* ``pathlib`` objects (all fully resolved):
- ``test_file_path`` - the current test file path (=``__file__``)
- ``test_file_dir`` - the directory containing the current test file
- ``tests_dir`` - the directory of the ``tests`` test suite
- ``examples_dir`` - the directory of the ``examples`` test suite
- ``repo_root_dir`` - the directory of the repository
- ``src_dir`` - the directory of ``src`` (i.e. where the ``transformers`` sub-dir resides)
* stringified paths---same as above but these return paths as strings, rather than ``pathlib`` objects:
- ``test_file_path_str``
- ``test_file_dir_str``
- ``tests_dir_str``
- ``examples_dir_str``
- ``repo_root_dir_str``
- ``src_dir_str``
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
::
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
``tmp_dir`` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
::
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the ``before`` and ``after`` args, leading to the
following behavior:
``before=True``: the temporary dir will always be cleared at the beginning of the test.
``before=False``: if the temporary dir already existed, any existing files will remain there.
``after=True``: the temporary dir will always be deleted at the end of the test.
``after=False``: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of ``rm -r`` safely, only subdirs of the project repository checkout are
allowed if an explicit ``tmp_dir`` is used, so that by mistake no ``/tmp`` or similar important part of the
filesystem will get nuked. i.e. please always pass paths that start with ``./``
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the ``os.environ`` object that sets up ``PYTHONPATH`` specific to the current test suite.
This is useful for invoking external programs from the test suite - e.g. distributed training.
::
def test_whatever(self):
env = self.get_env()
"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the ``os.environ`` object that sets up ``PYTHONPATH`` correctly, depending on the test suite
it's invoked from. This is useful for invoking external programs from the test suite - e.g. distributed
training.
It always inserts ``./src`` first, then ``./tests`` or ``./examples`` depending on the test suite type and
finally the preset ``PYTHONPATH`` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (:obj:`string`, `optional`):
if :obj:`None`:
- a unique temporary path will be created
- sets ``before=True`` if ``before`` is :obj:`None`
- sets ``after=True`` if ``after`` is :obj:`None`
else:
- :obj:`tmp_dir` will be created
- sets ``before=True`` if ``before`` is :obj:`None`
- sets ``after=False`` if ``after`` is :obj:`None`
before (:obj:`bool`, `optional`):
If :obj:`True` and the :obj:`tmp_dir` already exists, make sure to empty it right away if :obj:`False`
and the :obj:`tmp_dir` already exists, any existing files will remain there.
after (:obj:`bool`, `optional`):
If :obj:`True`, delete the :obj:`tmp_dir` at the end of the test if :obj:`False`, leave the
:obj:`tmp_dir` and its contents intact at the end of the test.
Returns:
tmp_dir(:obj:`string`): either the same value as passed via `tmp_dir` or the path to the auto-selected tmp
dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False)
def test_something():
run_slow = os.getenv("RUN_SLOW", False)
use_tf = os.getenv("USE_TF", False)
"""
return unittest.mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the ``os.environ`` dictionary in-place. Similar to mockenv
The ``os.environ`` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should
pytest do internal changes - also it calls default internal methods of terminalreporter which
can be hijacked by various `pytest-` plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = "reports"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{id}_{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
with open(report_files["passes"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
from transformers.tokenization_utils import BatchEncoding
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
elif isinstance(obj, (dict, BatchEncoding)):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int)):
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist())
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import inspect
import logging
import os
import re
import shutil
import sys
import tempfile
import unittest
from distutils.util import strtobool
from io import StringIO
from pathlib import Path
from typing import Iterator, Union
from .file_utils import (
is_datasets_available,
is_faiss_available,
is_flax_available,
is_onnx_available,
is_pandas_available,
is_scatter_available,
is_sentencepiece_available,
is_soundfile_availble,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_torch_tpu_available,
is_torchaudio_available,
is_vision_available,
)
from .integrations import is_optuna_available, is_ray_available
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKWOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
PASS = "__DUMMY_TRANSFORMERS_PASS__"
ENDPOINT_STAGING = "https://moon-staging.huggingface.co"
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=False)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=False)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=False)
_run_git_lfs_tests = parse_flag_from_env("RUN_GIT_LFS_TESTS", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip("test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip("test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test.
Pipeline tests are skipped by default and we can run only them by setting RUN_PIPELINE_TESTS environment variable
to a truthy value and selecting the is_pipeline_test pytest mark.
"""
if not _run_pipeline_tests:
return unittest.skip("test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip("test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
if not _run_slow_tests:
return unittest.skip("test is slow")(test_case)
else:
return test_case
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip("test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
if not _run_custom_tokenizers:
return unittest.skip("test of custom tokenizers")(test_case)
else:
return test_case
def require_git_lfs(test_case):
"""
Decorator marking a test that requires git-lfs.
git-lfs requires additional dependencies, and tests are skipped by default. Set the RUN_GIT_LFS_TESTS environment
variable to a truthy value to run them.
"""
if not _run_git_lfs_tests:
return unittest.skip("test of git lfs workflow")(test_case)
else:
return test_case
def require_onnx(test_case):
if not is_onnx_available():
return unittest.skip("test requires ONNX")(test_case)
else:
return test_case
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
else:
return test_case
def require_torch_scatter(test_case):
"""
Decorator marking a test that requires PyTorch scatter.
These tests are skipped when PyTorch scatter isn't installed.
"""
if not is_scatter_available():
return unittest.skip("test requires PyTorch scatter")(test_case)
else:
return test_case
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
if not is_torchaudio_available():
return unittest.skip("test requires torchaudio")(test_case)
else:
return test_case
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
if not is_tf_available():
return unittest.skip("test requires TensorFlow")(test_case)
else:
return test_case
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
if not is_flax_available():
test_case = unittest.skip("test requires JAX & Flax")(test_case)
return test_case
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
if not is_sentencepiece_available():
return unittest.skip("test requires SentencePiece")(test_case)
else:
return test_case
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
if not is_tokenizers_available():
return unittest.skip("test requires tokenizers")(test_case)
else:
return test_case
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
if not is_pandas_available():
return unittest.skip("test requires pandas")(test_case)
else:
return test_case
def require_scatter(test_case):
"""
Decorator marking a test that requires PyTorch Scatter. These tests are skipped when PyTorch Scatter isn't
installed.
"""
if not is_scatter_available():
return unittest.skip("test requires PyTorch Scatter")(test_case)
else:
return test_case
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
if not is_vision_available():
return unittest.skip("test requires vision")(test_case)
else:
return test_case
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() < 2:
return unittest.skip("test requires multiple GPUs")(test_case)
else:
return test_case
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() > 1:
return unittest.skip("test requires 0 or 1 GPU")(test_case)
else:
return test_case
def require_torch_tpu(test_case):
"""
Decorator marking a test that requires a TPU (in PyTorch).
"""
if not is_torch_tpu_available():
return unittest.skip("test requires PyTorch TPU")
else:
return test_case
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch. """
if torch_device != "cuda":
return unittest.skip("test requires CUDA")(test_case)
else:
return test_case
def require_datasets(test_case):
"""Decorator marking a test that requires datasets."""
if not is_datasets_available():
return unittest.skip("test requires `datasets`")(test_case)
else:
return test_case
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
if not is_faiss_available():
return unittest.skip("test requires `faiss`")(test_case)
else:
return test_case
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
if not is_optuna_available():
return unittest.skip("test requires optuna")(test_case)
else:
return test_case
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
if not is_ray_available():
return unittest.skip("test requires Ray/tune")(test_case)
else:
return test_case
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
if not is_soundfile_availble():
return unittest.skip("test requires soundfile")(test_case)
else:
return test_case
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch or tf is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout, clean it up and make it available via obj.out
- stderr, and make it available via obj.err
init arguments:
- out - capture stdout: True/False, default True
- err - capture stdout: True/False, default True
Examples::
with CaptureStdout() as cs:
print("Secret message")
print(f"captured: {cs.out}")
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
print(f"captured: {cs.err}")
# to capture just one of the streams, but not the other
with CaptureStd(err=False) as cs:
print("Secret message")
print(f"captured: {cs.out}")
# but best use the stream-specific subclasses
"""
def __init__(self, out=True, err=True):
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
self.out = apply_print_resets(self.out_buf.getvalue())
if self.err_buf:
sys.stderr = self.err_old
self.err = self.err_buf.getvalue()
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
""" Same as CaptureStd but captures only stdout """
def __init__(self):
super().__init__(err=False)
class CaptureStderr(CaptureStd):
""" Same as CaptureStd but captures only stderr """
def __init__(self):
super().__init__(out=False)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
- logger: 'logging` logger object
Results:
The captured output is available via `self.out`
Example::
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg+"\n"
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage ::
with ExtendSysPath('/path/to/dir'):
mymodule = importlib.import_module('mymodule')
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends `unittest.TestCase` with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
* ``pathlib`` objects (all fully resolved):
- ``test_file_path`` - the current test file path (=``__file__``)
- ``test_file_dir`` - the directory containing the current test file
- ``tests_dir`` - the directory of the ``tests`` test suite
- ``examples_dir`` - the directory of the ``examples`` test suite
- ``repo_root_dir`` - the directory of the repository
- ``src_dir`` - the directory of ``src`` (i.e. where the ``transformers`` sub-dir resides)
* stringified paths---same as above but these return paths as strings, rather than ``pathlib`` objects:
- ``test_file_path_str``
- ``test_file_dir_str``
- ``tests_dir_str``
- ``examples_dir_str``
- ``repo_root_dir_str``
- ``src_dir_str``
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
::
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
``tmp_dir`` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
::
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the ``before`` and ``after`` args, leading to the
following behavior:
``before=True``: the temporary dir will always be cleared at the beginning of the test.
``before=False``: if the temporary dir already existed, any existing files will remain there.
``after=True``: the temporary dir will always be deleted at the end of the test.
``after=False``: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of ``rm -r`` safely, only subdirs of the project repository checkout are
allowed if an explicit ``tmp_dir`` is used, so that by mistake no ``/tmp`` or similar important part of the
filesystem will get nuked. i.e. please always pass paths that start with ``./``
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the ``os.environ`` object that sets up ``PYTHONPATH`` specific to the current test suite.
This is useful for invoking external programs from the test suite - e.g. distributed training.
::
def test_whatever(self):
env = self.get_env()
"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the ``os.environ`` object that sets up ``PYTHONPATH`` correctly, depending on the test suite
it's invoked from. This is useful for invoking external programs from the test suite - e.g. distributed
training.
It always inserts ``./src`` first, then ``./tests`` or ``./examples`` depending on the test suite type and
finally the preset ``PYTHONPATH`` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (:obj:`string`, `optional`):
if :obj:`None`:
- a unique temporary path will be created
- sets ``before=True`` if ``before`` is :obj:`None`
- sets ``after=True`` if ``after`` is :obj:`None`
else:
- :obj:`tmp_dir` will be created
- sets ``before=True`` if ``before`` is :obj:`None`
- sets ``after=False`` if ``after`` is :obj:`None`
before (:obj:`bool`, `optional`):
If :obj:`True` and the :obj:`tmp_dir` already exists, make sure to empty it right away if :obj:`False`
and the :obj:`tmp_dir` already exists, any existing files will remain there.
after (:obj:`bool`, `optional`):
If :obj:`True`, delete the :obj:`tmp_dir` at the end of the test if :obj:`False`, leave the
:obj:`tmp_dir` and its contents intact at the end of the test.
Returns:
tmp_dir(:obj:`string`): either the same value as passed via `tmp_dir` or the path to the auto-selected tmp
dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False)
def test_something():
run_slow = os.getenv("RUN_SLOW", False)
use_tf = os.getenv("USE_TF", False)
"""
return unittest.mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the ``os.environ`` dictionary in-place. Similar to mockenv
The ``os.environ`` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should
pytest do internal changes - also it calls default internal methods of terminalreporter which
can be hijacked by various `pytest-` plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = "reports"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{id}_{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
with open(report_files["passes"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
from transformers.tokenization_utils import BatchEncoding
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
elif isinstance(obj, (dict, BatchEncoding)):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int)):
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist())
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env(
"RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(
min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(
torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(
torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError(
"/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(
urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(
f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(
input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(
codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(
r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed,
"cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {
"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {
"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(
f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError(
"Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env("RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env("RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env("RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env("RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError("/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
tr.reportchars = "wPpsxXEf" # emulate -rA (used in summary_passes() and short_test_summary())
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed, "cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError("Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| 2024-09-07 06:09:35.483577: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
2024-09-07 06:09:35.498176: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:485] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered
2024-09-07 06:09:35.513855: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:8454] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered
2024-09-07 06:09:35.518569: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1452] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered
2024-09-07 06:09:35.531047: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: AVX2 AVX512F AVX512_VNNI AVX512_BF16 AVX512_FP16 AVX_VNNI AMX_TILE AMX_INT8 AMX_BF16 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
2024-09-07 06:09:36.599978: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
ImportError while loading conftest '/workspace/test_repo/conftest.py'.
conftest.py:26: in <module>
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
src/transformers/testing_utils.py:54: in <module>
from .utils import (
E ImportError: cannot import name 'GGUF_MIN_VERSION' from 'transformers.utils' (/workspace/test_repo/src/transformers/utils/__init__.py)
| {'total': 17, 'passed': 6, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 3, 'warning': 8, 'error': 0} | {'total': 0, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | ./test_repo/src/transformers/testing_utils.py | ./test_repo/examples/tensorflow/test_tensorflow_examples.py |
2_transformers_callee__testing_utils__is_pipeline_test__3a40cdf58d8b7233e561d1bf9e76f2ed02d9c4b7 | callee | fail-to-pass | https://github.com/huggingface/transformers | 3a40cdf58d8b7233e561d1bf9e76f2ed02d9c4b7 | function | is_pipeline_test | testing_utils.py | def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test.
Pipeline tests are skipped by default and we can run only them by setting RUN_PIPELINE_TEST environment variable
to a truthy value and selecting the is_pipeline_test pytest mark.
"""
if not _run_pipeline_tests:
return unittest.skip("test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
| def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
| import inspect
import logging
import os
import re
import shutil
import sys
import tempfile
import unittest
from distutils.util import strtobool
from io import StringIO
from pathlib import Path
from .file_utils import (
_datasets_available,
_faiss_available,
_flax_available,
_sentencepiece_available,
_tf_available,
_tokenizers_available,
_torch_available,
_torch_tpu_available,
)
from .integrations import _has_optuna, _has_ray
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKWOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError("If set, {} must be yes or no.".format(key))
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError("If set, {} must be a int.".format(key))
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=False)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not _torch_available or not _tf_available:
return unittest.skip("test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable
to a truthy value to run them.
"""
if not _run_slow_tests:
return unittest.skip("test is slow")(test_case)
else:
return test_case
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped
by default. Set the RUN_CUSTOM_TOKENIZERS environment variable
to a truthy value to run them.
"""
if not _run_custom_tokenizers:
return unittest.skip("test of custom tokenizers")(test_case)
else:
return test_case
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
if not _torch_available:
return unittest.skip("test requires PyTorch")(test_case)
else:
return test_case
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow.
These tests are skipped when TensorFlow isn't installed.
"""
if not _tf_available:
return unittest.skip("test requires TensorFlow")(test_case)
else:
return test_case
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax
These tests are skipped when one / both are not installed
"""
if not _flax_available:
test_case = unittest.skip("test requires JAX & Flax")(test_case)
return test_case
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece.
These tests are skipped when SentencePiece isn't installed.
"""
if not _sentencepiece_available:
return unittest.skip("test requires SentencePiece")(test_case)
else:
return test_case
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers.
These tests are skipped when 🤗 Tokenizers isn't installed.
"""
if not _tokenizers_available:
return unittest.skip("test requires tokenizers")(test_case)
else:
return test_case
def require_torch_multigpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch).
These tests are skipped on a machine without multiple GPUs.
To run *only* the multigpu tests, assuming all test names contain multigpu:
$ pytest -sv ./tests -k "multigpu"
"""
if not _torch_available:
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() < 2:
return unittest.skip("test requires multiple GPUs")(test_case)
else:
return test_case
def require_torch_non_multigpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not _torch_available:
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() > 1:
return unittest.skip("test requires 0 or 1 GPU")(test_case)
else:
return test_case
def require_torch_tpu(test_case):
"""
Decorator marking a test that requires a TPU (in PyTorch).
"""
if not _torch_tpu_available:
return unittest.skip("test requires PyTorch TPU")
else:
return test_case
if _torch_available:
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
else:
torch_device = None
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch. """
if torch_device != "cuda":
return unittest.skip("test requires CUDA")(test_case)
else:
return test_case
def require_datasets(test_case):
"""Decorator marking a test that requires datasets."""
if not _datasets_available:
return unittest.skip("test requires `datasets`")(test_case)
else:
return test_case
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
if not _faiss_available:
return unittest.skip("test requires `faiss`")(test_case)
else:
return test_case
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
if not _has_optuna:
return unittest.skip("test requires optuna")(test_case)
else:
return test_case
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
if not _has_ray:
return unittest.skip("test requires Ray/tune")(test_case)
else:
return test_case
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere.
Optionally `append_path` is joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""Context manager to capture:
stdout, clean it up and make it available via obj.out
stderr, and make it available via obj.err
init arguments:
- out - capture stdout: True/False, default True
- err - capture stdout: True/False, default True
Examples:
with CaptureStdout() as cs:
print("Secret message")
print(f"captured: {cs.out}")
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
print(f"captured: {cs.err}")
# to capture just one of the streams, but not the other
with CaptureStd(err=False) as cs:
print("Secret message")
print(f"captured: {cs.out}")
# but best use the stream-specific subclasses
"""
def __init__(self, out=True, err=True):
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
self.out = apply_print_resets(self.out_buf.getvalue())
if self.err_buf:
sys.stderr = self.err_old
self.err = self.err_buf.getvalue()
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
""" Same as CaptureStd but captures only stdout """
def __init__(self):
super().__init__(err=False)
class CaptureStderr(CaptureStd):
""" Same as CaptureStd but captures only stderr """
def __init__(self):
super().__init__(out=False)
class CaptureLogger:
"""Context manager to capture `logging` streams
Args:
- logger: 'logging` logger object
Results:
The captured output is available via `self.out`
Example:
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg+"\n"
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
class TestCasePlus(unittest.TestCase):
"""This class extends `unittest.TestCase` with additional features.
Feature 1: Flexible auto-removable temp dirs which are guaranteed to get
removed at the end of test.
In all the following scenarios the temp dir will be auto-removed at the end
of test, unless `after=False`.
# 1. create a unique temp dir, `tmp_dir` will contain the path to the created temp dir
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
# 2. create a temp dir of my choice and delete it at the end - useful for debug when you want to
# monitor a specific directory
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir(tmp_dir="./tmp/run/test")
# 3. create a temp dir of my choice and do not delete it at the end - useful for when you want
# to look at the temp results
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir(tmp_dir="./tmp/run/test", after=False)
# 4. create a temp dir of my choice and ensure to delete it right away - useful for when you
# disabled deletion in the previous test run and want to make sure the that tmp dir is empty
# before the new test is run
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir(tmp_dir="./tmp/run/test", before=True)
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the
project repository checkout are allowed if an explicit `tmp_dir` is used, so
that by mistake no `/tmp` or similar important part of the filesystem will
get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temp dirs and they all will get
auto-removed, unless requested otherwise.
"""
def setUp(self):
self.teardown_tmp_dirs = []
def get_auto_remove_tmp_dir(self, tmp_dir=None, after=True, before=False):
"""
Args:
tmp_dir (:obj:`string`, `optional`):
use this path, if None a unique path will be assigned
before (:obj:`bool`, `optional`, defaults to :obj:`False`):
if `True` and tmp dir already exists make sure to empty it right away
after (:obj:`bool`, `optional`, defaults to :obj:`True`):
delete the tmp dir at the end of the test
Returns:
tmp_dir(:obj:`string`):
either the same value as passed via `tmp_dir` or the path to the auto-created tmp dir
"""
if tmp_dir is not None:
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def tearDown(self):
# remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
def mockenv(**kwargs):
"""this is a convenience wrapper, that allows this:
@mockenv(RUN_SLOW=True, USE_TF=False)
def test_something():
run_slow = os.getenv("RUN_SLOW", False)
use_tf = os.getenv("USE_TF", False)
"""
return unittest.mock.patch.dict(os.environ, kwargs)
| import inspect
import logging
import os
import re
import shutil
import sys
import tempfile
import unittest
from distutils.util import strtobool
from io import StringIO
from pathlib import Path
from .file_utils import (
_datasets_available,
_faiss_available,
_flax_available,
_sentencepiece_available,
_tf_available,
_tokenizers_available,
_torch_available,
_torch_tpu_available,
)
from .integrations import _has_optuna, _has_ray
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKWOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError("If set, {} must be yes or no.".format(key))
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError("If set, {} must be a int.".format(key))
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=False)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not _torch_available or not _tf_available:
return unittest.skip("test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test.
Pipeline tests are skipped by default and we can run only them by setting RUN_PIPELINE_TEST environment variable
to a truthy value and selecting the is_pipeline_test pytest mark.
"""
if not _run_pipeline_tests:
return unittest.skip("test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable
to a truthy value to run them.
"""
if not _run_slow_tests:
return unittest.skip("test is slow")(test_case)
else:
return test_case
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped
by default. Set the RUN_CUSTOM_TOKENIZERS environment variable
to a truthy value to run them.
"""
if not _run_custom_tokenizers:
return unittest.skip("test of custom tokenizers")(test_case)
else:
return test_case
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
if not _torch_available:
return unittest.skip("test requires PyTorch")(test_case)
else:
return test_case
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow.
These tests are skipped when TensorFlow isn't installed.
"""
if not _tf_available:
return unittest.skip("test requires TensorFlow")(test_case)
else:
return test_case
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax
These tests are skipped when one / both are not installed
"""
if not _flax_available:
test_case = unittest.skip("test requires JAX & Flax")(test_case)
return test_case
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece.
These tests are skipped when SentencePiece isn't installed.
"""
if not _sentencepiece_available:
return unittest.skip("test requires SentencePiece")(test_case)
else:
return test_case
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers.
These tests are skipped when 🤗 Tokenizers isn't installed.
"""
if not _tokenizers_available:
return unittest.skip("test requires tokenizers")(test_case)
else:
return test_case
def require_torch_multigpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch).
These tests are skipped on a machine without multiple GPUs.
To run *only* the multigpu tests, assuming all test names contain multigpu:
$ pytest -sv ./tests -k "multigpu"
"""
if not _torch_available:
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() < 2:
return unittest.skip("test requires multiple GPUs")(test_case)
else:
return test_case
def require_torch_non_multigpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not _torch_available:
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() > 1:
return unittest.skip("test requires 0 or 1 GPU")(test_case)
else:
return test_case
def require_torch_tpu(test_case):
"""
Decorator marking a test that requires a TPU (in PyTorch).
"""
if not _torch_tpu_available:
return unittest.skip("test requires PyTorch TPU")
else:
return test_case
if _torch_available:
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
else:
torch_device = None
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch. """
if torch_device != "cuda":
return unittest.skip("test requires CUDA")(test_case)
else:
return test_case
def require_datasets(test_case):
"""Decorator marking a test that requires datasets."""
if not _datasets_available:
return unittest.skip("test requires `datasets`")(test_case)
else:
return test_case
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
if not _faiss_available:
return unittest.skip("test requires `faiss`")(test_case)
else:
return test_case
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
if not _has_optuna:
return unittest.skip("test requires optuna")(test_case)
else:
return test_case
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
if not _has_ray:
return unittest.skip("test requires Ray/tune")(test_case)
else:
return test_case
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere.
Optionally `append_path` is joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""Context manager to capture:
stdout, clean it up and make it available via obj.out
stderr, and make it available via obj.err
init arguments:
- out - capture stdout: True/False, default True
- err - capture stdout: True/False, default True
Examples:
with CaptureStdout() as cs:
print("Secret message")
print(f"captured: {cs.out}")
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
print(f"captured: {cs.err}")
# to capture just one of the streams, but not the other
with CaptureStd(err=False) as cs:
print("Secret message")
print(f"captured: {cs.out}")
# but best use the stream-specific subclasses
"""
def __init__(self, out=True, err=True):
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
self.out = apply_print_resets(self.out_buf.getvalue())
if self.err_buf:
sys.stderr = self.err_old
self.err = self.err_buf.getvalue()
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
""" Same as CaptureStd but captures only stdout """
def __init__(self):
super().__init__(err=False)
class CaptureStderr(CaptureStd):
""" Same as CaptureStd but captures only stderr """
def __init__(self):
super().__init__(out=False)
class CaptureLogger:
"""Context manager to capture `logging` streams
Args:
- logger: 'logging` logger object
Results:
The captured output is available via `self.out`
Example:
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg+"\n"
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
class TestCasePlus(unittest.TestCase):
"""This class extends `unittest.TestCase` with additional features.
Feature 1: Flexible auto-removable temp dirs which are guaranteed to get
removed at the end of test.
In all the following scenarios the temp dir will be auto-removed at the end
of test, unless `after=False`.
# 1. create a unique temp dir, `tmp_dir` will contain the path to the created temp dir
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
# 2. create a temp dir of my choice and delete it at the end - useful for debug when you want to
# monitor a specific directory
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir(tmp_dir="./tmp/run/test")
# 3. create a temp dir of my choice and do not delete it at the end - useful for when you want
# to look at the temp results
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir(tmp_dir="./tmp/run/test", after=False)
# 4. create a temp dir of my choice and ensure to delete it right away - useful for when you
# disabled deletion in the previous test run and want to make sure the that tmp dir is empty
# before the new test is run
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir(tmp_dir="./tmp/run/test", before=True)
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the
project repository checkout are allowed if an explicit `tmp_dir` is used, so
that by mistake no `/tmp` or similar important part of the filesystem will
get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temp dirs and they all will get
auto-removed, unless requested otherwise.
"""
def setUp(self):
self.teardown_tmp_dirs = []
def get_auto_remove_tmp_dir(self, tmp_dir=None, after=True, before=False):
"""
Args:
tmp_dir (:obj:`string`, `optional`):
use this path, if None a unique path will be assigned
before (:obj:`bool`, `optional`, defaults to :obj:`False`):
if `True` and tmp dir already exists make sure to empty it right away
after (:obj:`bool`, `optional`, defaults to :obj:`True`):
delete the tmp dir at the end of the test
Returns:
tmp_dir(:obj:`string`):
either the same value as passed via `tmp_dir` or the path to the auto-created tmp dir
"""
if tmp_dir is not None:
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def tearDown(self):
# remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
def mockenv(**kwargs):
"""this is a convenience wrapper, that allows this:
@mockenv(RUN_SLOW=True, USE_TF=False)
def test_something():
run_slow = os.getenv("RUN_SLOW", False)
use_tf = os.getenv("USE_TF", False)
"""
return unittest.mock.patch.dict(os.environ, kwargs)
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env(
"RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(
min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(
torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(
torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError(
"/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(
urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(
f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(
input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(
codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(
r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed,
"cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {
"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {
"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(
f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError(
"Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env("RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env("RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env("RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env("RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError("/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
tr.reportchars = "wPpsxXEf" # emulate -rA (used in summary_passes() and short_test_summary())
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed, "cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError("Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| 2024-09-07 06:13:29.408602: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
2024-09-07 06:13:29.423556: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:485] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered
2024-09-07 06:13:29.439295: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:8454] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered
2024-09-07 06:13:29.444036: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1452] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered
2024-09-07 06:13:29.456529: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: AVX2 AVX512F AVX512_VNNI AVX512_BF16 AVX512_FP16 AVX_VNNI AMX_TILE AMX_INT8 AMX_BF16 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
2024-09-07 06:13:30.515949: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
ImportError while loading conftest '/workspace/test_repo/conftest.py'.
conftest.py:26: in <module>
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
src/transformers/testing_utils.py:54: in <module>
from .utils import (
E ImportError: cannot import name 'GGUF_MIN_VERSION' from 'transformers.utils' (/workspace/test_repo/src/transformers/utils/__init__.py)
| {'total': 17, 'passed': 6, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 3, 'warning': 8, 'error': 0} | {'total': 0, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | ./test_repo/src/transformers/testing_utils.py | ./test_repo/examples/tensorflow/test_tensorflow_examples.py |
2_transformers_callee__testing_utils__is_agent_test__0ba15cedbcbe9a9db2d5cde76ea4bb712c311934 | callee | fail-to-pass | https://github.com/huggingface/transformers | 0ba15cedbcbe9a9db2d5cde76ea4bb712c311934 | function | is_agent_test | testing_utils.py | def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip("test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
| def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env(
"RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip("test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip("test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip("test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip("test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip("test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(
min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip("test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU and IPEX.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires IPEX and an XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup with IPEX and at least one XPU device. These tests are
skipped on a machine without IPEX or multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip("test requires IPEX and at least one XPU device")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(
torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(
torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip("test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError(
"/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(
urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(
f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(
input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(
codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(
r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed,
"cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {
"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {
"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(
f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError(
"Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env(
"RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip("test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip("test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip("test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip("test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip("test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip("test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(
min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip("test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU and IPEX.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires IPEX and an XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup with IPEX and at least one XPU device. These tests are
skipped on a machine without IPEX or multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip("test requires IPEX and at least one XPU device")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(
torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(
torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip("test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError(
"/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(
urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(
f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(
input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(
codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(
r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed,
"cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {
"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {
"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(
f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError(
"Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env(
"RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(
min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(
torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(
torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError(
"/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(
urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(
f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(
input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(
codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(
r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed,
"cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {
"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {
"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(
f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError(
"Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env("RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env("RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env("RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env("RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError("/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
tr.reportchars = "wPpsxXEf" # emulate -rA (used in summary_passes() and short_test_summary())
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed, "cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError("Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| 2024-09-07 06:16:54.197911: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
2024-09-07 06:16:54.212054: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:485] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered
2024-09-07 06:16:54.227998: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:8454] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered
2024-09-07 06:16:54.232792: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1452] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered
2024-09-07 06:16:54.245374: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: AVX2 AVX512F AVX512_VNNI AVX512_BF16 AVX512_FP16 AVX_VNNI AMX_TILE AMX_INT8 AMX_BF16 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
2024-09-07 06:16:55.303333: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
ImportError while loading conftest '/workspace/test_repo/conftest.py'.
conftest.py:26: in <module>
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
src/transformers/testing_utils.py:54: in <module>
from .utils import (
E ImportError: cannot import name 'GGUF_MIN_VERSION' from 'transformers.utils' (/workspace/test_repo/src/transformers/utils/__init__.py)
| {'total': 17, 'passed': 6, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 3, 'warning': 8, 'error': 0} | {'total': 0, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | ./test_repo/src/transformers/testing_utils.py | ./test_repo/examples/tensorflow/test_tensorflow_examples.py |
2_transformers_callee__testing_utils__slow__13deb95a405bbd1037ad233c692d7fd1de9d31e3 | callee | fail-to-pass | https://github.com/huggingface/transformers | 13deb95a405bbd1037ad233c692d7fd1de9d31e3 | function | slow | testing_utils.py | def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable
to a truthy value to run them.
"""
if not _run_slow_tests:
test_case = unittest.skip("test is slow")(test_case)
return test_case
| def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
| import os
import unittest
from distutils.util import strtobool
from transformers.file_utils import _tf_available, _torch_available
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKWOWN_IDENTIFIER = "julien-c/dummy-unknown"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError("If set, {} must be yes or no.".format(key))
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError("If set, {} must be a int.".format(key))
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped
by default. Set the RUN_CUSTOM_TOKENIZERS environment variable
to a truthy value to run them.
"""
if not _run_custom_tokenizers:
test_case = unittest.skip("test of custom tokenizers")(test_case)
return test_case
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
if not _torch_available:
test_case = unittest.skip("test requires PyTorch")(test_case)
return test_case
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow.
These tests are skipped when TensorFlow isn't installed.
"""
if not _tf_available:
test_case = unittest.skip("test requires TensorFlow")(test_case)
return test_case
def require_multigpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch).
These tests are skipped on a machine without multiple GPUs.
To run *only* the multigpu tests, assuming all test names contain multigpu:
$ pytest -sv ./tests -k "multigpu"
"""
if not _torch_available:
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() < 2:
return unittest.skip("test requires multiple GPUs")(test_case)
return test_case
if _torch_available:
# Set the USE_CUDA environment variable to select a GPU.
torch_device = "cuda" if parse_flag_from_env("USE_CUDA") else "cpu"
else:
torch_device = None
| import os
import unittest
from distutils.util import strtobool
from transformers.file_utils import _tf_available, _torch_available
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKWOWN_IDENTIFIER = "julien-c/dummy-unknown"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError("If set, {} must be yes or no.".format(key))
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError("If set, {} must be a int.".format(key))
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable
to a truthy value to run them.
"""
if not _run_slow_tests:
test_case = unittest.skip("test is slow")(test_case)
return test_case
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped
by default. Set the RUN_CUSTOM_TOKENIZERS environment variable
to a truthy value to run them.
"""
if not _run_custom_tokenizers:
test_case = unittest.skip("test of custom tokenizers")(test_case)
return test_case
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
if not _torch_available:
test_case = unittest.skip("test requires PyTorch")(test_case)
return test_case
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow.
These tests are skipped when TensorFlow isn't installed.
"""
if not _tf_available:
test_case = unittest.skip("test requires TensorFlow")(test_case)
return test_case
def require_multigpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch).
These tests are skipped on a machine without multiple GPUs.
To run *only* the multigpu tests, assuming all test names contain multigpu:
$ pytest -sv ./tests -k "multigpu"
"""
if not _torch_available:
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() < 2:
return unittest.skip("test requires multiple GPUs")(test_case)
return test_case
if _torch_available:
# Set the USE_CUDA environment variable to select a GPU.
torch_device = "cuda" if parse_flag_from_env("USE_CUDA") else "cpu"
else:
torch_device = None
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env(
"RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(
min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(
torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(
torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError(
"/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(
urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(
f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(
input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(
codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(
r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed,
"cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {
"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {
"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(
f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError(
"Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env("RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env("RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env("RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env("RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError("/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
tr.reportchars = "wPpsxXEf" # emulate -rA (used in summary_passes() and short_test_summary())
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed, "cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError("Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| 2024-09-07 06:20:20.102281: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
2024-09-07 06:20:20.116234: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:485] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered
2024-09-07 06:20:20.132004: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:8454] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered
2024-09-07 06:20:20.136744: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1452] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered
2024-09-07 06:20:20.149182: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: AVX2 AVX512F AVX512_VNNI AVX512_BF16 AVX512_FP16 AVX_VNNI AMX_TILE AMX_INT8 AMX_BF16 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
2024-09-07 06:20:21.202931: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
ImportError while loading conftest '/workspace/test_repo/conftest.py'.
conftest.py:26: in <module>
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
src/transformers/testing_utils.py:54: in <module>
from .utils import (
E ImportError: cannot import name 'GGUF_MIN_VERSION' from 'transformers.utils' (/workspace/test_repo/src/transformers/utils/__init__.py)
| {'total': 17, 'passed': 6, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 3, 'warning': 8, 'error': 0} | {'total': 0, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | ./test_repo/src/transformers/testing_utils.py | ./test_repo/examples/tensorflow/test_tensorflow_examples.py |
2_transformers_callee__testing_utils__tooslow__58f672e65ca6e2efe8c8ed7abcb5392429309af7 | callee | fail-to-pass | https://github.com/huggingface/transformers | 58f672e65ca6e2efe8c8ed7abcb5392429309af7 | function | tooslow | testing_utils.py | def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip("test is too slow")(test_case)
| def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import inspect
import logging
import os
import re
import shutil
import sys
import tempfile
import unittest
from distutils.util import strtobool
from io import StringIO
from pathlib import Path
from .file_utils import (
is_datasets_available,
is_faiss_available,
is_flax_available,
is_onnx_available,
is_pandas_available,
is_scatter_available,
is_sentencepiece_available,
is_soundfile_availble,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_torch_tpu_available,
is_torchaudio_available,
)
from .integrations import is_optuna_available, is_ray_available
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKWOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError("If set, {} must be yes or no.".format(key))
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError("If set, {} must be a int.".format(key))
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=False)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=False)
_run_git_lfs_tests = parse_flag_from_env("RUN_GIT_LFS_TESTS", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip("test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test.
Pipeline tests are skipped by default and we can run only them by setting RUN_PIPELINE_TESTS environment variable
to a truthy value and selecting the is_pipeline_test pytest mark.
"""
if not _run_pipeline_tests:
return unittest.skip("test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
if not _run_slow_tests:
return unittest.skip("test is slow")(test_case)
else:
return test_case
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
if not _run_custom_tokenizers:
return unittest.skip("test of custom tokenizers")(test_case)
else:
return test_case
def require_git_lfs(test_case):
"""
Decorator marking a test that requires git-lfs.
git-lfs requires additional dependencies, and tests are skipped by default. Set the RUN_GIT_LFS_TESTS environment
variable to a truthy value to run them.
"""
if not _run_git_lfs_tests:
return unittest.skip("test of git lfs workflow")(test_case)
else:
return test_case
def require_onnx(test_case):
if not is_onnx_available():
return unittest.skip("test requires ONNX")(test_case)
else:
return test_case
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
else:
return test_case
def require_torch_scatter(test_case):
"""
Decorator marking a test that requires PyTorch scatter.
These tests are skipped when PyTorch scatter isn't installed.
"""
if not is_scatter_available():
return unittest.skip("test requires PyTorch scatter")(test_case)
else:
return test_case
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio.
These tests are skipped when torchaudio isn't installed.
"""
if not is_torchaudio_available:
return unittest.skip("test requires torchaudio")(test_case)
else:
return test_case
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow.
These tests are skipped when TensorFlow isn't installed.
"""
if not is_tf_available():
return unittest.skip("test requires TensorFlow")(test_case)
else:
return test_case
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax
These tests are skipped when one / both are not installed
"""
if not is_flax_available():
test_case = unittest.skip("test requires JAX & Flax")(test_case)
return test_case
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece.
These tests are skipped when SentencePiece isn't installed.
"""
if not is_sentencepiece_available():
return unittest.skip("test requires SentencePiece")(test_case)
else:
return test_case
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers.
These tests are skipped when 🤗 Tokenizers isn't installed.
"""
if not is_tokenizers_available():
return unittest.skip("test requires tokenizers")(test_case)
else:
return test_case
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
if not is_pandas_available():
return unittest.skip("test requires pandas")(test_case)
else:
return test_case
def require_scatter(test_case):
"""
Decorator marking a test that requires PyTorch Scatter. These tests are skipped when PyTorch Scatter isn't
installed.
"""
if not is_scatter_available():
return unittest.skip("test requires PyTorch Scatter")(test_case)
else:
return test_case
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch).
These tests are skipped on a machine without multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() < 2:
return unittest.skip("test requires multiple GPUs")(test_case)
else:
return test_case
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() > 1:
return unittest.skip("test requires 0 or 1 GPU")(test_case)
else:
return test_case
def require_torch_tpu(test_case):
"""
Decorator marking a test that requires a TPU (in PyTorch).
"""
if not is_torch_tpu_available():
return unittest.skip("test requires PyTorch TPU")
else:
return test_case
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
else:
torch_device = None
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch. """
if torch_device != "cuda":
return unittest.skip("test requires CUDA")(test_case)
else:
return test_case
def require_datasets(test_case):
"""Decorator marking a test that requires datasets."""
if not is_datasets_available():
return unittest.skip("test requires `datasets`")(test_case)
else:
return test_case
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
if not is_faiss_available():
return unittest.skip("test requires `faiss`")(test_case)
else:
return test_case
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
if not is_optuna_available():
return unittest.skip("test requires optuna")(test_case)
else:
return test_case
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
if not is_ray_available():
return unittest.skip("test requires Ray/tune")(test_case)
else:
return test_case
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
if not is_soundfile_availble():
return unittest.skip("test requires soundfile")(test_case)
else:
return test_case
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch or tf is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
stdout, clean it up and make it available via obj.out stderr, and make it available via obj.err
init arguments: - out - capture stdout: True/False, default True - err - capture stdout: True/False, default
True
Examples::
with CaptureStdout() as cs:
print("Secret message")
print(f"captured: {cs.out}")
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
print(f"captured: {cs.err}")
# to capture just one of the streams, but not the other
with CaptureStd(err=False) as cs:
print("Secret message")
print(f"captured: {cs.out}")
# but best use the stream-specific subclasses
"""
def __init__(self, out=True, err=True):
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
self.out = apply_print_resets(self.out_buf.getvalue())
if self.err_buf:
sys.stderr = self.err_old
self.err = self.err_buf.getvalue()
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
""" Same as CaptureStd but captures only stdout """
def __init__(self):
super().__init__(err=False)
class CaptureStderr(CaptureStd):
""" Same as CaptureStd but captures only stderr """
def __init__(self):
super().__init__(out=False)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
- logger: 'logging` logger object
Results:
The captured output is available via `self.out`
Example::
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg+"\n"
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
class TestCasePlus(unittest.TestCase):
"""
This class extends `unittest.TestCase` with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
* ``pathlib`` objects (all fully resolved):
- ``test_file_path`` - the current test file path (=``__file__``)
- ``test_file_dir`` - the directory containing the current test file
- ``tests_dir`` - the directory of the ``tests`` test suite
- ``examples_dir`` - the directory of the ``examples`` test suite
- ``repo_root_dir`` - the directory of the repository
- ``src_dir`` - the directory of ``src`` (i.e. where the ``transformers`` sub-dir resides)
* stringified paths---same as above but these return paths as strings, rather than ``pathlib`` objects:
- ``test_file_path_str``
- ``test_file_dir_str``
- ``tests_dir_str``
- ``examples_dir_str``
- ``repo_root_dir_str``
- ``src_dir_str``
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
::
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
``tmp_dir`` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
::
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the ``before`` and ``after`` args, leading to the
following behavior:
``before=True``: the temporary dir will always be cleared at the beginning of the test.
``before=False``: if the temporary dir already existed, any existing files will remain there.
``after=True``: the temporary dir will always be deleted at the end of the test.
``after=False``: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of ``rm -r`` safely, only subdirs of the project repository checkout are
allowed if an explicit ``tmp_dir`` is used, so that by mistake no ``/tmp`` or similar important part of the
filesystem will get nuked. i.e. please always pass paths that start with ``./``
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the ``os.environ`` object that sets up ``PYTHONPATH`` specific to the current test suite.
This is useful for invoking external programs from the test suite - e.g. distributed training.
::
def test_whatever(self):
env = self.get_env()
"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the ``os.environ`` object that sets up ``PYTHONPATH`` correctly, depending on the test suite
it's invoked from. This is useful for invoking external programs from the test suite - e.g. distributed
training.
It always inserts ``./src`` first, then ``./tests`` or ``./examples`` depending on the test suite type and
finally the preset ``PYTHONPATH`` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (:obj:`string`, `optional`):
if :obj:`None`:
- a unique temporary path will be created
- sets ``before=True`` if ``before`` is :obj:`None`
- sets ``after=True`` if ``after`` is :obj:`None`
else:
- :obj:`tmp_dir` will be created
- sets ``before=True`` if ``before`` is :obj:`None`
- sets ``after=False`` if ``after`` is :obj:`None`
before (:obj:`bool`, `optional`):
If :obj:`True` and the :obj:`tmp_dir` already exists, make sure to empty it right away if :obj:`False`
and the :obj:`tmp_dir` already exists, any existing files will remain there.
after (:obj:`bool`, `optional`):
If :obj:`True`, delete the :obj:`tmp_dir` at the end of the test if :obj:`False`, leave the
:obj:`tmp_dir` and its contents intact at the end of the test.
Returns:
tmp_dir(:obj:`string`): either the same value as passed via `tmp_dir` or the path to the auto-selected tmp
dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False)
def test_something():
run_slow = os.getenv("RUN_SLOW", False)
use_tf = os.getenv("USE_TF", False)
"""
return unittest.mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the ``os.environ`` dictionary in-place. Similar to mockenv
The ``os.environ`` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should
pytest do internal changes - also it calls default internal methods of terminalreporter which
can be hijacked by various `pytest-` plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = "reports"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{id}_{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
with open(report_files["passes"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import inspect
import logging
import os
import re
import shutil
import sys
import tempfile
import unittest
from distutils.util import strtobool
from io import StringIO
from pathlib import Path
from .file_utils import (
is_datasets_available,
is_faiss_available,
is_flax_available,
is_onnx_available,
is_pandas_available,
is_scatter_available,
is_sentencepiece_available,
is_soundfile_availble,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_torch_tpu_available,
is_torchaudio_available,
)
from .integrations import is_optuna_available, is_ray_available
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKWOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError("If set, {} must be yes or no.".format(key))
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError("If set, {} must be a int.".format(key))
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=False)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=False)
_run_git_lfs_tests = parse_flag_from_env("RUN_GIT_LFS_TESTS", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip("test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test.
Pipeline tests are skipped by default and we can run only them by setting RUN_PIPELINE_TESTS environment variable
to a truthy value and selecting the is_pipeline_test pytest mark.
"""
if not _run_pipeline_tests:
return unittest.skip("test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
if not _run_slow_tests:
return unittest.skip("test is slow")(test_case)
else:
return test_case
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip("test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
if not _run_custom_tokenizers:
return unittest.skip("test of custom tokenizers")(test_case)
else:
return test_case
def require_git_lfs(test_case):
"""
Decorator marking a test that requires git-lfs.
git-lfs requires additional dependencies, and tests are skipped by default. Set the RUN_GIT_LFS_TESTS environment
variable to a truthy value to run them.
"""
if not _run_git_lfs_tests:
return unittest.skip("test of git lfs workflow")(test_case)
else:
return test_case
def require_onnx(test_case):
if not is_onnx_available():
return unittest.skip("test requires ONNX")(test_case)
else:
return test_case
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
else:
return test_case
def require_torch_scatter(test_case):
"""
Decorator marking a test that requires PyTorch scatter.
These tests are skipped when PyTorch scatter isn't installed.
"""
if not is_scatter_available():
return unittest.skip("test requires PyTorch scatter")(test_case)
else:
return test_case
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio.
These tests are skipped when torchaudio isn't installed.
"""
if not is_torchaudio_available:
return unittest.skip("test requires torchaudio")(test_case)
else:
return test_case
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow.
These tests are skipped when TensorFlow isn't installed.
"""
if not is_tf_available():
return unittest.skip("test requires TensorFlow")(test_case)
else:
return test_case
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax
These tests are skipped when one / both are not installed
"""
if not is_flax_available():
test_case = unittest.skip("test requires JAX & Flax")(test_case)
return test_case
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece.
These tests are skipped when SentencePiece isn't installed.
"""
if not is_sentencepiece_available():
return unittest.skip("test requires SentencePiece")(test_case)
else:
return test_case
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers.
These tests are skipped when 🤗 Tokenizers isn't installed.
"""
if not is_tokenizers_available():
return unittest.skip("test requires tokenizers")(test_case)
else:
return test_case
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
if not is_pandas_available():
return unittest.skip("test requires pandas")(test_case)
else:
return test_case
def require_scatter(test_case):
"""
Decorator marking a test that requires PyTorch Scatter. These tests are skipped when PyTorch Scatter isn't
installed.
"""
if not is_scatter_available():
return unittest.skip("test requires PyTorch Scatter")(test_case)
else:
return test_case
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch).
These tests are skipped on a machine without multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() < 2:
return unittest.skip("test requires multiple GPUs")(test_case)
else:
return test_case
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() > 1:
return unittest.skip("test requires 0 or 1 GPU")(test_case)
else:
return test_case
def require_torch_tpu(test_case):
"""
Decorator marking a test that requires a TPU (in PyTorch).
"""
if not is_torch_tpu_available():
return unittest.skip("test requires PyTorch TPU")
else:
return test_case
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
else:
torch_device = None
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch. """
if torch_device != "cuda":
return unittest.skip("test requires CUDA")(test_case)
else:
return test_case
def require_datasets(test_case):
"""Decorator marking a test that requires datasets."""
if not is_datasets_available():
return unittest.skip("test requires `datasets`")(test_case)
else:
return test_case
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
if not is_faiss_available():
return unittest.skip("test requires `faiss`")(test_case)
else:
return test_case
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
if not is_optuna_available():
return unittest.skip("test requires optuna")(test_case)
else:
return test_case
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
if not is_ray_available():
return unittest.skip("test requires Ray/tune")(test_case)
else:
return test_case
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
if not is_soundfile_availble():
return unittest.skip("test requires soundfile")(test_case)
else:
return test_case
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch or tf is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
stdout, clean it up and make it available via obj.out stderr, and make it available via obj.err
init arguments: - out - capture stdout: True/False, default True - err - capture stdout: True/False, default
True
Examples::
with CaptureStdout() as cs:
print("Secret message")
print(f"captured: {cs.out}")
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
print(f"captured: {cs.err}")
# to capture just one of the streams, but not the other
with CaptureStd(err=False) as cs:
print("Secret message")
print(f"captured: {cs.out}")
# but best use the stream-specific subclasses
"""
def __init__(self, out=True, err=True):
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
self.out = apply_print_resets(self.out_buf.getvalue())
if self.err_buf:
sys.stderr = self.err_old
self.err = self.err_buf.getvalue()
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
""" Same as CaptureStd but captures only stdout """
def __init__(self):
super().__init__(err=False)
class CaptureStderr(CaptureStd):
""" Same as CaptureStd but captures only stderr """
def __init__(self):
super().__init__(out=False)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
- logger: 'logging` logger object
Results:
The captured output is available via `self.out`
Example::
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg+"\n"
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
class TestCasePlus(unittest.TestCase):
"""
This class extends `unittest.TestCase` with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
* ``pathlib`` objects (all fully resolved):
- ``test_file_path`` - the current test file path (=``__file__``)
- ``test_file_dir`` - the directory containing the current test file
- ``tests_dir`` - the directory of the ``tests`` test suite
- ``examples_dir`` - the directory of the ``examples`` test suite
- ``repo_root_dir`` - the directory of the repository
- ``src_dir`` - the directory of ``src`` (i.e. where the ``transformers`` sub-dir resides)
* stringified paths---same as above but these return paths as strings, rather than ``pathlib`` objects:
- ``test_file_path_str``
- ``test_file_dir_str``
- ``tests_dir_str``
- ``examples_dir_str``
- ``repo_root_dir_str``
- ``src_dir_str``
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
::
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
``tmp_dir`` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
::
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the ``before`` and ``after`` args, leading to the
following behavior:
``before=True``: the temporary dir will always be cleared at the beginning of the test.
``before=False``: if the temporary dir already existed, any existing files will remain there.
``after=True``: the temporary dir will always be deleted at the end of the test.
``after=False``: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of ``rm -r`` safely, only subdirs of the project repository checkout are
allowed if an explicit ``tmp_dir`` is used, so that by mistake no ``/tmp`` or similar important part of the
filesystem will get nuked. i.e. please always pass paths that start with ``./``
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the ``os.environ`` object that sets up ``PYTHONPATH`` specific to the current test suite.
This is useful for invoking external programs from the test suite - e.g. distributed training.
::
def test_whatever(self):
env = self.get_env()
"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the ``os.environ`` object that sets up ``PYTHONPATH`` correctly, depending on the test suite
it's invoked from. This is useful for invoking external programs from the test suite - e.g. distributed
training.
It always inserts ``./src`` first, then ``./tests`` or ``./examples`` depending on the test suite type and
finally the preset ``PYTHONPATH`` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (:obj:`string`, `optional`):
if :obj:`None`:
- a unique temporary path will be created
- sets ``before=True`` if ``before`` is :obj:`None`
- sets ``after=True`` if ``after`` is :obj:`None`
else:
- :obj:`tmp_dir` will be created
- sets ``before=True`` if ``before`` is :obj:`None`
- sets ``after=False`` if ``after`` is :obj:`None`
before (:obj:`bool`, `optional`):
If :obj:`True` and the :obj:`tmp_dir` already exists, make sure to empty it right away if :obj:`False`
and the :obj:`tmp_dir` already exists, any existing files will remain there.
after (:obj:`bool`, `optional`):
If :obj:`True`, delete the :obj:`tmp_dir` at the end of the test if :obj:`False`, leave the
:obj:`tmp_dir` and its contents intact at the end of the test.
Returns:
tmp_dir(:obj:`string`): either the same value as passed via `tmp_dir` or the path to the auto-selected tmp
dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False)
def test_something():
run_slow = os.getenv("RUN_SLOW", False)
use_tf = os.getenv("USE_TF", False)
"""
return unittest.mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the ``os.environ`` dictionary in-place. Similar to mockenv
The ``os.environ`` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should
pytest do internal changes - also it calls default internal methods of terminalreporter which
can be hijacked by various `pytest-` plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = "reports"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{id}_{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
with open(report_files["passes"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env(
"RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(
min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(
torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(
torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError(
"/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(
urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(
f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(
input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(
codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(
r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed,
"cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {
"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {
"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(
f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError(
"Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env("RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env("RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env("RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env("RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError("/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
tr.reportchars = "wPpsxXEf" # emulate -rA (used in summary_passes() and short_test_summary())
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed, "cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError("Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| 2024-09-07 06:23:31.477675: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
2024-09-07 06:23:31.491926: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:485] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered
2024-09-07 06:23:31.507853: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:8454] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered
2024-09-07 06:23:31.512633: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1452] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered
2024-09-07 06:23:31.525261: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: AVX2 AVX512F AVX512_VNNI AVX512_BF16 AVX512_FP16 AVX_VNNI AMX_TILE AMX_INT8 AMX_BF16 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
2024-09-07 06:23:32.603052: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
ImportError while loading conftest '/workspace/test_repo/conftest.py'.
conftest.py:26: in <module>
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
src/transformers/testing_utils.py:54: in <module>
from .utils import (
E ImportError: cannot import name 'GGUF_MIN_VERSION' from 'transformers.utils' (/workspace/test_repo/src/transformers/utils/__init__.py)
| {'total': 17, 'passed': 6, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 3, 'warning': 8, 'error': 0} | {'total': 0, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | ./test_repo/src/transformers/testing_utils.py | ./test_repo/examples/tensorflow/test_tensorflow_examples.py |
2_transformers_callee__testing_utils__custom_tokenizers__13deb95a405bbd1037ad233c692d7fd1de9d31e3 | callee | fail-to-pass | https://github.com/huggingface/transformers | 13deb95a405bbd1037ad233c692d7fd1de9d31e3 | function | custom_tokenizers | testing_utils.py | def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped
by default. Set the RUN_CUSTOM_TOKENIZERS environment variable
to a truthy value to run them.
"""
if not _run_custom_tokenizers:
test_case = unittest.skip("test of custom tokenizers")(test_case)
return test_case
| def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
| import os
import unittest
from distutils.util import strtobool
from transformers.file_utils import _tf_available, _torch_available
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKWOWN_IDENTIFIER = "julien-c/dummy-unknown"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError("If set, {} must be yes or no.".format(key))
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError("If set, {} must be a int.".format(key))
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable
to a truthy value to run them.
"""
if not _run_slow_tests:
test_case = unittest.skip("test is slow")(test_case)
return test_case
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
if not _torch_available:
test_case = unittest.skip("test requires PyTorch")(test_case)
return test_case
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow.
These tests are skipped when TensorFlow isn't installed.
"""
if not _tf_available:
test_case = unittest.skip("test requires TensorFlow")(test_case)
return test_case
def require_multigpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch).
These tests are skipped on a machine without multiple GPUs.
To run *only* the multigpu tests, assuming all test names contain multigpu:
$ pytest -sv ./tests -k "multigpu"
"""
if not _torch_available:
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() < 2:
return unittest.skip("test requires multiple GPUs")(test_case)
return test_case
if _torch_available:
# Set the USE_CUDA environment variable to select a GPU.
torch_device = "cuda" if parse_flag_from_env("USE_CUDA") else "cpu"
else:
torch_device = None
| import os
import unittest
from distutils.util import strtobool
from transformers.file_utils import _tf_available, _torch_available
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKWOWN_IDENTIFIER = "julien-c/dummy-unknown"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError("If set, {} must be yes or no.".format(key))
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError("If set, {} must be a int.".format(key))
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable
to a truthy value to run them.
"""
if not _run_slow_tests:
test_case = unittest.skip("test is slow")(test_case)
return test_case
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped
by default. Set the RUN_CUSTOM_TOKENIZERS environment variable
to a truthy value to run them.
"""
if not _run_custom_tokenizers:
test_case = unittest.skip("test of custom tokenizers")(test_case)
return test_case
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
if not _torch_available:
test_case = unittest.skip("test requires PyTorch")(test_case)
return test_case
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow.
These tests are skipped when TensorFlow isn't installed.
"""
if not _tf_available:
test_case = unittest.skip("test requires TensorFlow")(test_case)
return test_case
def require_multigpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch).
These tests are skipped on a machine without multiple GPUs.
To run *only* the multigpu tests, assuming all test names contain multigpu:
$ pytest -sv ./tests -k "multigpu"
"""
if not _torch_available:
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() < 2:
return unittest.skip("test requires multiple GPUs")(test_case)
return test_case
if _torch_available:
# Set the USE_CUDA environment variable to select a GPU.
torch_device = "cuda" if parse_flag_from_env("USE_CUDA") else "cpu"
else:
torch_device = None
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env(
"RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(
min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(
torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(
torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError(
"/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(
urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(
f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(
input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(
codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(
r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed,
"cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {
"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {
"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(
f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError(
"Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env("RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env("RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env("RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env("RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError("/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
tr.reportchars = "wPpsxXEf" # emulate -rA (used in summary_passes() and short_test_summary())
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed, "cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError("Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| 2024-09-07 06:26:48.967734: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
2024-09-07 06:26:48.981742: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:485] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered
2024-09-07 06:26:48.997219: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:8454] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered
2024-09-07 06:26:49.001882: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1452] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered
2024-09-07 06:26:49.014250: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: AVX2 AVX512F AVX512_VNNI AVX512_BF16 AVX512_FP16 AVX_VNNI AMX_TILE AMX_INT8 AMX_BF16 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
2024-09-07 06:26:50.134978: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
ImportError while loading conftest '/workspace/test_repo/conftest.py'.
conftest.py:26: in <module>
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
src/transformers/testing_utils.py:54: in <module>
from .utils import (
E ImportError: cannot import name 'GGUF_MIN_VERSION' from 'transformers.utils' (/workspace/test_repo/src/transformers/utils/__init__.py)
| {'total': 17, 'passed': 6, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 3, 'warning': 8, 'error': 0} | {'total': 0, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | ./test_repo/src/transformers/testing_utils.py | ./test_repo/examples/tensorflow/test_tensorflow_examples.py |
2_transformers_callee__testing_utils__require_accelerate__2fbb237967f5d1b2eb65c2131954f23a24bd29ef | callee | fail-to-pass | https://github.com/huggingface/transformers | 2fbb237967f5d1b2eb65c2131954f23a24bd29ef | function | require_accelerate | testing_utils.py | def require_accelerate(test_case):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(is_accelerate_available(), "test requires accelerate")(test_case)
| def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(
min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import inspect
import logging
import os
import re
import shlex
import shutil
import sys
import tempfile
import unittest
from collections.abc import Mapping
from distutils.util import strtobool
from io import StringIO
from pathlib import Path
from typing import Iterator, Union
from unittest import mock
from transformers import logging as transformers_logging
from .deepspeed import is_deepspeed_available
from .integrations import (
is_fairscale_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
)
from .utils import (
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_detectron2_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_librosa_available,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_scatter_available,
is_scipy_available,
is_sentencepiece_available,
is_soundfile_availble,
is_spacy_available,
is_tensorflow_probability_available,
is_tf2onnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available,
is_torch_tf32_available,
is_torch_tpu_available,
is_torchaudio_available,
is_vision_available,
)
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
PASS = "__DUMMY_TRANSFORMERS_PASS__"
ENDPOINT_STAGING = "https://moon-staging.huggingface.co"
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=False)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=False)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=False)
_run_git_lfs_tests = parse_flag_from_env("RUN_GIT_LFS_TESTS", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip("test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip("test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test.
Pipeline tests are skipped by default and we can run only them by setting RUN_PIPELINE_TESTS environment variable
to a truthy value and selecting the is_pipeline_test pytest mark.
"""
if not _run_pipeline_tests:
return unittest.skip("test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip("test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip("test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_git_lfs(test_case):
"""
Decorator marking a test that requires git-lfs.
git-lfs requires additional dependencies, and tests are skipped by default. Set the RUN_GIT_LFS_TESTS environment
variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_git_lfs_tests, "test of git lfs workflow")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_torch_scatter(test_case):
"""
Decorator marking a test that requires PyTorch scatter.
These tests are skipped when PyTorch scatter isn't installed.
"""
return unittest.skipUnless(is_scatter_available(), "test requires PyTorch scatter")(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_scatter(test_case):
"""
Decorator marking a test that requires PyTorch Scatter. These tests are skipped when PyTorch Scatter isn't
installed.
"""
return unittest.skipUnless(is_scatter_available(), "test requires PyTorch Scatter")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_tpu(test_case):
"""
Decorator marking a test that requires a TPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_tpu_available(), "test requires PyTorch TPU")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.10."""
return unittest.skipUnless(
is_torch_bf16_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.10"
)(test_case)
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_fairscale(test_case):
"""
Decorator marking a test that requires fairscale
"""
return unittest.skipUnless(is_fairscale_available(), "test requires fairscale")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator for bits and bytes (bnb) dependency
"""
return unittest.skipUnless(is_bitsandbytes_available(), "test requires bnb")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError(
"/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
with open(report_files["passes"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import inspect
import logging
import os
import re
import shlex
import shutil
import sys
import tempfile
import unittest
from collections.abc import Mapping
from distutils.util import strtobool
from io import StringIO
from pathlib import Path
from typing import Iterator, Union
from unittest import mock
from transformers import logging as transformers_logging
from .deepspeed import is_deepspeed_available
from .integrations import (
is_fairscale_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
)
from .utils import (
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_detectron2_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_librosa_available,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_scatter_available,
is_scipy_available,
is_sentencepiece_available,
is_soundfile_availble,
is_spacy_available,
is_tensorflow_probability_available,
is_tf2onnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available,
is_torch_tf32_available,
is_torch_tpu_available,
is_torchaudio_available,
is_vision_available,
)
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
PASS = "__DUMMY_TRANSFORMERS_PASS__"
ENDPOINT_STAGING = "https://moon-staging.huggingface.co"
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=False)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=False)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=False)
_run_git_lfs_tests = parse_flag_from_env("RUN_GIT_LFS_TESTS", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip("test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip("test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test.
Pipeline tests are skipped by default and we can run only them by setting RUN_PIPELINE_TESTS environment variable
to a truthy value and selecting the is_pipeline_test pytest mark.
"""
if not _run_pipeline_tests:
return unittest.skip("test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip("test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip("test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_git_lfs(test_case):
"""
Decorator marking a test that requires git-lfs.
git-lfs requires additional dependencies, and tests are skipped by default. Set the RUN_GIT_LFS_TESTS environment
variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_git_lfs_tests, "test of git lfs workflow")(test_case)
def require_accelerate(test_case):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(is_accelerate_available(), "test requires accelerate")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_torch_scatter(test_case):
"""
Decorator marking a test that requires PyTorch scatter.
These tests are skipped when PyTorch scatter isn't installed.
"""
return unittest.skipUnless(is_scatter_available(), "test requires PyTorch scatter")(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_scatter(test_case):
"""
Decorator marking a test that requires PyTorch Scatter. These tests are skipped when PyTorch Scatter isn't
installed.
"""
return unittest.skipUnless(is_scatter_available(), "test requires PyTorch Scatter")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_tpu(test_case):
"""
Decorator marking a test that requires a TPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_tpu_available(), "test requires PyTorch TPU")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.10."""
return unittest.skipUnless(
is_torch_bf16_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.10"
)(test_case)
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_fairscale(test_case):
"""
Decorator marking a test that requires fairscale
"""
return unittest.skipUnless(is_fairscale_available(), "test requires fairscale")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator for bits and bytes (bnb) dependency
"""
return unittest.skipUnless(is_bitsandbytes_available(), "test requires bnb")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError(
"/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
with open(report_files["passes"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env(
"RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(
torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(
torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError(
"/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(
urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(
f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(
input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(
codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(
r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed,
"cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {
"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {
"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(
f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError(
"Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env("RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env("RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env("RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env("RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError("/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
tr.reportchars = "wPpsxXEf" # emulate -rA (used in summary_passes() and short_test_summary())
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed, "cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError("Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| 2024-09-07 06:46:32.485089: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
2024-09-07 06:46:32.499108: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:485] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered
2024-09-07 06:46:32.514937: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:8454] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered
2024-09-07 06:46:32.519652: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1452] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered
2024-09-07 06:46:32.532163: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: AVX2 AVX512F AVX512_VNNI AVX512_BF16 AVX512_FP16 AVX_VNNI AMX_TILE AMX_INT8 AMX_BF16 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
2024-09-07 06:46:33.610334: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
ImportError while loading conftest '/workspace/test_repo/conftest.py'.
conftest.py:26: in <module>
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
src/transformers/testing_utils.py:54: in <module>
from .utils import (
E ImportError: cannot import name 'GGUF_MIN_VERSION' from 'transformers.utils' (/workspace/test_repo/src/transformers/utils/__init__.py)
| {'total': 17, 'passed': 6, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 3, 'warning': 8, 'error': 0} | {'total': 0, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | ./test_repo/src/transformers/testing_utils.py | ./test_repo/examples/tensorflow/test_tensorflow_examples.py |
2_transformers_callee__testing_utils__require_gguf__a42844955f3134a2b2fd8075353c710dcfb7b362 | callee | fail-to-pass | https://github.com/huggingface/transformers | a42844955f3134a2b2fd8075353c710dcfb7b362 | function | require_gguf | testing_utils.py | def require_gguf(test_case):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(), "test requires gguf")(test_case)
| def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env(
"RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip("test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip("test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip("test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip("test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip("test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip("test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(
min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip("test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU and IPEX.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires IPEX and an XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup with IPEX and at least one XPU device. These tests are
skipped on a machine without IPEX or multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip("test requires IPEX and at least one XPU device")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(
torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(
torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip("test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError(
"/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(
urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(
f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(
input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(
codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(
r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed,
"cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {
"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {
"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(
f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError(
"Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env(
"RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip("test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip("test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip("test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip("test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip("test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip("test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(
min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(), "test requires gguf")(test_case)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip("test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU and IPEX.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires IPEX and an XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup with IPEX and at least one XPU device. These tests are
skipped on a machine without IPEX or multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip("test requires IPEX and at least one XPU device")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(
torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(
torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip("test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError(
"/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(
urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(
f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(
input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(
codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(
r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed,
"cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {
"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {
"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(
f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError(
"Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env(
"RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(
min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(
torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(
torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError(
"/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(
urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(
f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(
input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(
codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(
r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed,
"cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {
"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {
"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(
f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError(
"Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env("RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env("RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env("RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env("RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError("/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
tr.reportchars = "wPpsxXEf" # emulate -rA (used in summary_passes() and short_test_summary())
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed, "cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError("Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| 2024-09-07 06:49:41.406945: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
2024-09-07 06:49:41.421049: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:485] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered
2024-09-07 06:49:41.436897: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:8454] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered
2024-09-07 06:49:41.441686: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1452] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered
2024-09-07 06:49:41.454297: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: AVX2 AVX512F AVX512_VNNI AVX512_BF16 AVX512_FP16 AVX_VNNI AMX_TILE AMX_INT8 AMX_BF16 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
2024-09-07 06:49:42.545559: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
ImportError while loading conftest '/workspace/test_repo/conftest.py'.
conftest.py:26: in <module>
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
src/transformers/testing_utils.py:54: in <module>
from .utils import (
E ImportError: cannot import name 'GGUF_MIN_VERSION' from 'transformers.utils' (/workspace/test_repo/src/transformers/utils/__init__.py)
| {'total': 17, 'passed': 6, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 3, 'warning': 8, 'error': 0} | {'total': 0, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | ./test_repo/src/transformers/testing_utils.py | ./test_repo/examples/tensorflow/test_tensorflow_examples.py |
2_transformers_callee__testing_utils__require_rjieba__626a0a01471accc32ded29ccca3ed93c4995fcd6 | callee | fail-to-pass | https://github.com/huggingface/transformers | 626a0a01471accc32ded29ccca3ed93c4995fcd6 | function | require_rjieba | testing_utils.py | def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
if not is_rjieba_available():
return unittest.skip("test requires rjieba")(test_case)
else:
return test_case
| def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import inspect
import logging
import os
import re
import shutil
import sys
import tempfile
import unittest
from distutils.util import strtobool
from io import StringIO
from pathlib import Path
from typing import Iterator, Union
from transformers import logging as transformers_logging
from .deepspeed import is_deepspeed_available
from .file_utils import (
is_datasets_available,
is_faiss_available,
is_flax_available,
is_onnx_available,
is_pandas_available,
is_rjieba_available,
is_scatter_available,
is_sentencepiece_available,
is_soundfile_availble,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_tpu_available,
is_torchaudio_available,
is_vision_available,
)
from .integrations import is_optuna_available, is_ray_available
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKWOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
PASS = "__DUMMY_TRANSFORMERS_PASS__"
ENDPOINT_STAGING = "https://moon-staging.huggingface.co"
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=False)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=False)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=False)
_run_git_lfs_tests = parse_flag_from_env("RUN_GIT_LFS_TESTS", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip("test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip("test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test.
Pipeline tests are skipped by default and we can run only them by setting RUN_PIPELINE_TESTS environment variable
to a truthy value and selecting the is_pipeline_test pytest mark.
"""
if not _run_pipeline_tests:
return unittest.skip("test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip("test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
if not _run_slow_tests:
return unittest.skip("test is slow")(test_case)
else:
return test_case
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip("test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
if not _run_custom_tokenizers:
return unittest.skip("test of custom tokenizers")(test_case)
else:
return test_case
def require_git_lfs(test_case):
"""
Decorator marking a test that requires git-lfs.
git-lfs requires additional dependencies, and tests are skipped by default. Set the RUN_GIT_LFS_TESTS environment
variable to a truthy value to run them.
"""
if not _run_git_lfs_tests:
return unittest.skip("test of git lfs workflow")(test_case)
else:
return test_case
def require_onnx(test_case):
if not is_onnx_available():
return unittest.skip("test requires ONNX")(test_case)
else:
return test_case
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
if not is_timm_available():
return unittest.skip("test requires Timm")(test_case)
else:
return test_case
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
else:
return test_case
def require_torch_scatter(test_case):
"""
Decorator marking a test that requires PyTorch scatter.
These tests are skipped when PyTorch scatter isn't installed.
"""
if not is_scatter_available():
return unittest.skip("test requires PyTorch scatter")(test_case)
else:
return test_case
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
if not is_torchaudio_available():
return unittest.skip("test requires torchaudio")(test_case)
else:
return test_case
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
if not is_tf_available():
return unittest.skip("test requires TensorFlow")(test_case)
else:
return test_case
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
if not is_flax_available():
test_case = unittest.skip("test requires JAX & Flax")(test_case)
return test_case
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
if not is_sentencepiece_available():
return unittest.skip("test requires SentencePiece")(test_case)
else:
return test_case
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
if not is_tokenizers_available():
return unittest.skip("test requires tokenizers")(test_case)
else:
return test_case
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
if not is_pandas_available():
return unittest.skip("test requires pandas")(test_case)
else:
return test_case
def require_scatter(test_case):
"""
Decorator marking a test that requires PyTorch Scatter. These tests are skipped when PyTorch Scatter isn't
installed.
"""
if not is_scatter_available():
return unittest.skip("test requires PyTorch Scatter")(test_case)
else:
return test_case
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
if not is_vision_available():
return unittest.skip("test requires vision")(test_case)
else:
return test_case
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() < 2:
return unittest.skip("test requires multiple GPUs")(test_case)
else:
return test_case
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() > 1:
return unittest.skip("test requires 0 or 1 GPU")(test_case)
else:
return test_case
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() > 2:
return unittest.skip("test requires 0 or 1 or 2 GPUs")(test_case)
else:
return test_case
def require_torch_tpu(test_case):
"""
Decorator marking a test that requires a TPU (in PyTorch).
"""
if not is_torch_tpu_available():
return unittest.skip("test requires PyTorch TPU")
else:
return test_case
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
if torch_device != "cuda":
return unittest.skip("test requires CUDA")(test_case)
else:
return test_case
def require_datasets(test_case):
"""Decorator marking a test that requires datasets."""
if not is_datasets_available():
return unittest.skip("test requires `datasets`")(test_case)
else:
return test_case
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
if not is_faiss_available():
return unittest.skip("test requires `faiss`")(test_case)
else:
return test_case
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
if not is_optuna_available():
return unittest.skip("test requires optuna")(test_case)
else:
return test_case
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
if not is_ray_available():
return unittest.skip("test requires Ray/tune")(test_case)
else:
return test_case
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
if not is_soundfile_availble():
return unittest.skip("test requires soundfile")(test_case)
else:
return test_case
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
if not is_deepspeed_available():
return unittest.skip("test requires deepspeed")(test_case)
else:
return test_case
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch or tf is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout, clean it up and make it available via obj.out
- stderr, and make it available via obj.err
init arguments:
- out - capture stdout: True/False, default True
- err - capture stdout: True/False, default True
Examples::
with CaptureStdout() as cs:
print("Secret message")
print(f"captured: {cs.out}")
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
print(f"captured: {cs.err}")
# to capture just one of the streams, but not the other
with CaptureStd(err=False) as cs:
print("Secret message")
print(f"captured: {cs.out}")
# but best use the stream-specific subclasses
"""
def __init__(self, out=True, err=True):
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
self.out = apply_print_resets(self.out_buf.getvalue())
if self.err_buf:
sys.stderr = self.err_old
self.err = self.err_buf.getvalue()
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self):
super().__init__(err=False)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self):
super().__init__(out=False)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
- logger: 'logging` logger object
Results:
The captured output is available via `self.out`
Example::
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg+"\n"
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
For example ::
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("gpt2") # calls logger.info() several times
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage ::
with ExtendSysPath('/path/to/dir'):
mymodule = importlib.import_module('mymodule')
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends `unittest.TestCase` with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
* ``pathlib`` objects (all fully resolved):
- ``test_file_path`` - the current test file path (=``__file__``)
- ``test_file_dir`` - the directory containing the current test file
- ``tests_dir`` - the directory of the ``tests`` test suite
- ``examples_dir`` - the directory of the ``examples`` test suite
- ``repo_root_dir`` - the directory of the repository
- ``src_dir`` - the directory of ``src`` (i.e. where the ``transformers`` sub-dir resides)
* stringified paths---same as above but these return paths as strings, rather than ``pathlib`` objects:
- ``test_file_path_str``
- ``test_file_dir_str``
- ``tests_dir_str``
- ``examples_dir_str``
- ``repo_root_dir_str``
- ``src_dir_str``
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
::
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
``tmp_dir`` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
::
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the ``before`` and ``after`` args, leading to the
following behavior:
``before=True``: the temporary dir will always be cleared at the beginning of the test.
``before=False``: if the temporary dir already existed, any existing files will remain there.
``after=True``: the temporary dir will always be deleted at the end of the test.
``after=False``: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of ``rm -r`` safely, only subdirs of the project repository checkout are
allowed if an explicit ``tmp_dir`` is used, so that by mistake no ``/tmp`` or similar important part of the
filesystem will get nuked. i.e. please always pass paths that start with ``./``
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the ``os.environ`` object that sets up ``PYTHONPATH`` specific to the current test suite.
This is useful for invoking external programs from the test suite - e.g. distributed training.
::
def test_whatever(self):
env = self.get_env()
"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the ``os.environ`` object that sets up ``PYTHONPATH`` correctly, depending on the test suite
it's invoked from. This is useful for invoking external programs from the test suite - e.g. distributed
training.
It always inserts ``./src`` first, then ``./tests`` or ``./examples`` depending on the test suite type and
finally the preset ``PYTHONPATH`` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (:obj:`string`, `optional`):
if :obj:`None`:
- a unique temporary path will be created
- sets ``before=True`` if ``before`` is :obj:`None`
- sets ``after=True`` if ``after`` is :obj:`None`
else:
- :obj:`tmp_dir` will be created
- sets ``before=True`` if ``before`` is :obj:`None`
- sets ``after=False`` if ``after`` is :obj:`None`
before (:obj:`bool`, `optional`):
If :obj:`True` and the :obj:`tmp_dir` already exists, make sure to empty it right away if :obj:`False`
and the :obj:`tmp_dir` already exists, any existing files will remain there.
after (:obj:`bool`, `optional`):
If :obj:`True`, delete the :obj:`tmp_dir` at the end of the test if :obj:`False`, leave the
:obj:`tmp_dir` and its contents intact at the end of the test.
Returns:
tmp_dir(:obj:`string`): either the same value as passed via `tmp_dir` or the path to the auto-selected tmp
dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False)
def test_something():
run_slow = os.getenv("RUN_SLOW", False)
use_tf = os.getenv("USE_TF", False)
"""
return unittest.mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the ``os.environ`` dictionary in-place. Similar to mockenv
The ``os.environ`` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should
pytest do internal changes - also it calls default internal methods of terminalreporter which
can be hijacked by various `pytest-` plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = "reports"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{id}_{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
with open(report_files["passes"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under ``pytest-xdist``'s concurrent workers ``pytest -n N`` regime,
or 0 if ``-n 1`` or ``pytest-xdist`` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to ``torch.distributed.launch``'s ``--master_port`` argument.
Under ``pytest-xdist`` it adds a delta number based on a worker id so that concurrent tests don't try to use the
same port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
from transformers.tokenization_utils import BatchEncoding
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, (dict, BatchEncoding)):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, np.float32):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import inspect
import logging
import os
import re
import shutil
import sys
import tempfile
import unittest
from distutils.util import strtobool
from io import StringIO
from pathlib import Path
from typing import Iterator, Union
from transformers import logging as transformers_logging
from .deepspeed import is_deepspeed_available
from .file_utils import (
is_datasets_available,
is_faiss_available,
is_flax_available,
is_onnx_available,
is_pandas_available,
is_rjieba_available,
is_scatter_available,
is_sentencepiece_available,
is_soundfile_availble,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_tpu_available,
is_torchaudio_available,
is_vision_available,
)
from .integrations import is_optuna_available, is_ray_available
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKWOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
PASS = "__DUMMY_TRANSFORMERS_PASS__"
ENDPOINT_STAGING = "https://moon-staging.huggingface.co"
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=False)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=False)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=False)
_run_git_lfs_tests = parse_flag_from_env("RUN_GIT_LFS_TESTS", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip("test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip("test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test.
Pipeline tests are skipped by default and we can run only them by setting RUN_PIPELINE_TESTS environment variable
to a truthy value and selecting the is_pipeline_test pytest mark.
"""
if not _run_pipeline_tests:
return unittest.skip("test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip("test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
if not _run_slow_tests:
return unittest.skip("test is slow")(test_case)
else:
return test_case
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip("test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
if not _run_custom_tokenizers:
return unittest.skip("test of custom tokenizers")(test_case)
else:
return test_case
def require_git_lfs(test_case):
"""
Decorator marking a test that requires git-lfs.
git-lfs requires additional dependencies, and tests are skipped by default. Set the RUN_GIT_LFS_TESTS environment
variable to a truthy value to run them.
"""
if not _run_git_lfs_tests:
return unittest.skip("test of git lfs workflow")(test_case)
else:
return test_case
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
if not is_rjieba_available():
return unittest.skip("test requires rjieba")(test_case)
else:
return test_case
def require_onnx(test_case):
if not is_onnx_available():
return unittest.skip("test requires ONNX")(test_case)
else:
return test_case
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
if not is_timm_available():
return unittest.skip("test requires Timm")(test_case)
else:
return test_case
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
else:
return test_case
def require_torch_scatter(test_case):
"""
Decorator marking a test that requires PyTorch scatter.
These tests are skipped when PyTorch scatter isn't installed.
"""
if not is_scatter_available():
return unittest.skip("test requires PyTorch scatter")(test_case)
else:
return test_case
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
if not is_torchaudio_available():
return unittest.skip("test requires torchaudio")(test_case)
else:
return test_case
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
if not is_tf_available():
return unittest.skip("test requires TensorFlow")(test_case)
else:
return test_case
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
if not is_flax_available():
test_case = unittest.skip("test requires JAX & Flax")(test_case)
return test_case
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
if not is_sentencepiece_available():
return unittest.skip("test requires SentencePiece")(test_case)
else:
return test_case
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
if not is_tokenizers_available():
return unittest.skip("test requires tokenizers")(test_case)
else:
return test_case
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
if not is_pandas_available():
return unittest.skip("test requires pandas")(test_case)
else:
return test_case
def require_scatter(test_case):
"""
Decorator marking a test that requires PyTorch Scatter. These tests are skipped when PyTorch Scatter isn't
installed.
"""
if not is_scatter_available():
return unittest.skip("test requires PyTorch Scatter")(test_case)
else:
return test_case
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
if not is_vision_available():
return unittest.skip("test requires vision")(test_case)
else:
return test_case
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() < 2:
return unittest.skip("test requires multiple GPUs")(test_case)
else:
return test_case
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() > 1:
return unittest.skip("test requires 0 or 1 GPU")(test_case)
else:
return test_case
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() > 2:
return unittest.skip("test requires 0 or 1 or 2 GPUs")(test_case)
else:
return test_case
def require_torch_tpu(test_case):
"""
Decorator marking a test that requires a TPU (in PyTorch).
"""
if not is_torch_tpu_available():
return unittest.skip("test requires PyTorch TPU")
else:
return test_case
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
if torch_device != "cuda":
return unittest.skip("test requires CUDA")(test_case)
else:
return test_case
def require_datasets(test_case):
"""Decorator marking a test that requires datasets."""
if not is_datasets_available():
return unittest.skip("test requires `datasets`")(test_case)
else:
return test_case
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
if not is_faiss_available():
return unittest.skip("test requires `faiss`")(test_case)
else:
return test_case
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
if not is_optuna_available():
return unittest.skip("test requires optuna")(test_case)
else:
return test_case
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
if not is_ray_available():
return unittest.skip("test requires Ray/tune")(test_case)
else:
return test_case
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
if not is_soundfile_availble():
return unittest.skip("test requires soundfile")(test_case)
else:
return test_case
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
if not is_deepspeed_available():
return unittest.skip("test requires deepspeed")(test_case)
else:
return test_case
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch or tf is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout, clean it up and make it available via obj.out
- stderr, and make it available via obj.err
init arguments:
- out - capture stdout: True/False, default True
- err - capture stdout: True/False, default True
Examples::
with CaptureStdout() as cs:
print("Secret message")
print(f"captured: {cs.out}")
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
print(f"captured: {cs.err}")
# to capture just one of the streams, but not the other
with CaptureStd(err=False) as cs:
print("Secret message")
print(f"captured: {cs.out}")
# but best use the stream-specific subclasses
"""
def __init__(self, out=True, err=True):
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
self.out = apply_print_resets(self.out_buf.getvalue())
if self.err_buf:
sys.stderr = self.err_old
self.err = self.err_buf.getvalue()
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self):
super().__init__(err=False)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self):
super().__init__(out=False)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
- logger: 'logging` logger object
Results:
The captured output is available via `self.out`
Example::
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg+"\n"
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
For example ::
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("gpt2") # calls logger.info() several times
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage ::
with ExtendSysPath('/path/to/dir'):
mymodule = importlib.import_module('mymodule')
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends `unittest.TestCase` with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
* ``pathlib`` objects (all fully resolved):
- ``test_file_path`` - the current test file path (=``__file__``)
- ``test_file_dir`` - the directory containing the current test file
- ``tests_dir`` - the directory of the ``tests`` test suite
- ``examples_dir`` - the directory of the ``examples`` test suite
- ``repo_root_dir`` - the directory of the repository
- ``src_dir`` - the directory of ``src`` (i.e. where the ``transformers`` sub-dir resides)
* stringified paths---same as above but these return paths as strings, rather than ``pathlib`` objects:
- ``test_file_path_str``
- ``test_file_dir_str``
- ``tests_dir_str``
- ``examples_dir_str``
- ``repo_root_dir_str``
- ``src_dir_str``
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
::
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
``tmp_dir`` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
::
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the ``before`` and ``after`` args, leading to the
following behavior:
``before=True``: the temporary dir will always be cleared at the beginning of the test.
``before=False``: if the temporary dir already existed, any existing files will remain there.
``after=True``: the temporary dir will always be deleted at the end of the test.
``after=False``: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of ``rm -r`` safely, only subdirs of the project repository checkout are
allowed if an explicit ``tmp_dir`` is used, so that by mistake no ``/tmp`` or similar important part of the
filesystem will get nuked. i.e. please always pass paths that start with ``./``
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the ``os.environ`` object that sets up ``PYTHONPATH`` specific to the current test suite.
This is useful for invoking external programs from the test suite - e.g. distributed training.
::
def test_whatever(self):
env = self.get_env()
"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the ``os.environ`` object that sets up ``PYTHONPATH`` correctly, depending on the test suite
it's invoked from. This is useful for invoking external programs from the test suite - e.g. distributed
training.
It always inserts ``./src`` first, then ``./tests`` or ``./examples`` depending on the test suite type and
finally the preset ``PYTHONPATH`` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (:obj:`string`, `optional`):
if :obj:`None`:
- a unique temporary path will be created
- sets ``before=True`` if ``before`` is :obj:`None`
- sets ``after=True`` if ``after`` is :obj:`None`
else:
- :obj:`tmp_dir` will be created
- sets ``before=True`` if ``before`` is :obj:`None`
- sets ``after=False`` if ``after`` is :obj:`None`
before (:obj:`bool`, `optional`):
If :obj:`True` and the :obj:`tmp_dir` already exists, make sure to empty it right away if :obj:`False`
and the :obj:`tmp_dir` already exists, any existing files will remain there.
after (:obj:`bool`, `optional`):
If :obj:`True`, delete the :obj:`tmp_dir` at the end of the test if :obj:`False`, leave the
:obj:`tmp_dir` and its contents intact at the end of the test.
Returns:
tmp_dir(:obj:`string`): either the same value as passed via `tmp_dir` or the path to the auto-selected tmp
dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False)
def test_something():
run_slow = os.getenv("RUN_SLOW", False)
use_tf = os.getenv("USE_TF", False)
"""
return unittest.mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the ``os.environ`` dictionary in-place. Similar to mockenv
The ``os.environ`` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should
pytest do internal changes - also it calls default internal methods of terminalreporter which
can be hijacked by various `pytest-` plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = "reports"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{id}_{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
with open(report_files["passes"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under ``pytest-xdist``'s concurrent workers ``pytest -n N`` regime,
or 0 if ``-n 1`` or ``pytest-xdist`` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to ``torch.distributed.launch``'s ``--master_port`` argument.
Under ``pytest-xdist`` it adds a delta number based on a worker id so that concurrent tests don't try to use the
same port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
from transformers.tokenization_utils import BatchEncoding
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, (dict, BatchEncoding)):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, np.float32):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env(
"RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(
min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(
torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(
torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError(
"/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(
urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(
f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(
input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(
codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(
r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed,
"cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {
"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {
"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(
f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError(
"Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env("RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env("RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env("RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env("RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError("/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
tr.reportchars = "wPpsxXEf" # emulate -rA (used in summary_passes() and short_test_summary())
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed, "cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError("Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| 2024-09-07 07:00:09.591942: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
2024-09-07 07:00:09.607684: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:485] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered
2024-09-07 07:00:09.623846: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:8454] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered
2024-09-07 07:00:09.628741: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1452] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered
2024-09-07 07:00:09.641662: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: AVX2 AVX512F AVX512_VNNI AVX512_BF16 AVX512_FP16 AVX_VNNI AMX_TILE AMX_INT8 AMX_BF16 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
2024-09-07 07:00:10.746940: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
ImportError while loading conftest '/workspace/test_repo/conftest.py'.
conftest.py:26: in <module>
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
src/transformers/testing_utils.py:54: in <module>
from .utils import (
E ImportError: cannot import name 'GGUF_MIN_VERSION' from 'transformers.utils' (/workspace/test_repo/src/transformers/utils/__init__.py)
| {'total': 17, 'passed': 6, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 3, 'warning': 8, 'error': 0} | {'total': 0, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | ./test_repo/src/transformers/testing_utils.py | ./test_repo/examples/tensorflow/test_tensorflow_examples.py |
2_transformers_callee__testing_utils__require_tf2onnx__ebc4edfe7a4dfb820bf7f4faa02fb6de479b6e36 | callee | fail-to-pass | https://github.com/huggingface/transformers | ebc4edfe7a4dfb820bf7f4faa02fb6de479b6e36 | function | require_tf2onnx | testing_utils.py | def require_tf2onnx(test_case):
if not is_tf2onnx_available():
return unittest.skip("test requires tf2onnx")(test_case)
else:
return test_case
| def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import inspect
import logging
import os
import re
import shutil
import sys
import tempfile
import unittest
from distutils.util import strtobool
from io import StringIO
from pathlib import Path
from typing import Iterator, Union
from unittest import mock
from transformers import logging as transformers_logging
from .deepspeed import is_deepspeed_available
from .file_utils import (
is_detectron2_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_librosa_available,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_scatter_available,
is_sentencepiece_available,
is_soundfile_availble,
is_spacy_available,
is_tensorflow_probability_available,
is_tf2onnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available,
is_torch_tf32_available,
is_torch_tpu_available,
is_torchaudio_available,
is_vision_available,
)
from .integrations import is_optuna_available, is_ray_available, is_sigopt_available
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
PASS = "__DUMMY_TRANSFORMERS_PASS__"
ENDPOINT_STAGING = "https://moon-staging.huggingface.co"
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=False)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=False)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=False)
_run_git_lfs_tests = parse_flag_from_env("RUN_GIT_LFS_TESTS", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip("test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip("test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test.
Pipeline tests are skipped by default and we can run only them by setting RUN_PIPELINE_TESTS environment variable
to a truthy value and selecting the is_pipeline_test pytest mark.
"""
if not _run_pipeline_tests:
return unittest.skip("test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip("test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
if not _run_slow_tests:
return unittest.skip("test is slow")(test_case)
else:
return test_case
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip("test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
if not _run_custom_tokenizers:
return unittest.skip("test of custom tokenizers")(test_case)
else:
return test_case
def require_git_lfs(test_case):
"""
Decorator marking a test that requires git-lfs.
git-lfs requires additional dependencies, and tests are skipped by default. Set the RUN_GIT_LFS_TESTS environment
variable to a truthy value to run them.
"""
if not _run_git_lfs_tests:
return unittest.skip("test of git lfs workflow")(test_case)
else:
return test_case
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
if not is_rjieba_available():
return unittest.skip("test requires rjieba")(test_case)
else:
return test_case
def require_onnx(test_case):
if not is_onnx_available():
return unittest.skip("test requires ONNX")(test_case)
else:
return test_case
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
if not is_timm_available():
return unittest.skip("test requires Timm")(test_case)
else:
return test_case
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
else:
return test_case
def require_torch_scatter(test_case):
"""
Decorator marking a test that requires PyTorch scatter.
These tests are skipped when PyTorch scatter isn't installed.
"""
if not is_scatter_available():
return unittest.skip("test requires PyTorch scatter")(test_case)
else:
return test_case
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
if not is_tensorflow_probability_available():
return unittest.skip("test requires TensorFlow probability")(test_case)
else:
return test_case
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
if not is_torchaudio_available():
return unittest.skip("test requires torchaudio")(test_case)
else:
return test_case
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
if not is_tf_available():
return unittest.skip("test requires TensorFlow")(test_case)
else:
return test_case
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
if not is_flax_available():
test_case = unittest.skip("test requires JAX & Flax")(test_case)
return test_case
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
if not is_sentencepiece_available():
return unittest.skip("test requires SentencePiece")(test_case)
else:
return test_case
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
if not is_tokenizers_available():
return unittest.skip("test requires tokenizers")(test_case)
else:
return test_case
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
if not is_pandas_available():
return unittest.skip("test requires pandas")(test_case)
else:
return test_case
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
if not is_pytesseract_available():
return unittest.skip("test requires PyTesseract")(test_case)
else:
return test_case
def require_scatter(test_case):
"""
Decorator marking a test that requires PyTorch Scatter. These tests are skipped when PyTorch Scatter isn't
installed.
"""
if not is_scatter_available():
return unittest.skip("test requires PyTorch Scatter")(test_case)
else:
return test_case
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
if not is_pytorch_quantization_available():
return unittest.skip("test requires PyTorch Quantization Toolkit")(test_case)
else:
return test_case
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
if not is_vision_available():
return unittest.skip("test requires vision")(test_case)
else:
return test_case
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
if not is_ftfy_available():
return unittest.skip("test requires ftfy")(test_case)
else:
return test_case
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
if not is_spacy_available():
return unittest.skip("test requires spacy")(test_case)
else:
return test_case
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() < 2:
return unittest.skip("test requires multiple GPUs")(test_case)
else:
return test_case
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() > 1:
return unittest.skip("test requires 0 or 1 GPU")(test_case)
else:
return test_case
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() > 2:
return unittest.skip("test requires 0 or 1 or 2 GPUs")(test_case)
else:
return test_case
def require_torch_tpu(test_case):
"""
Decorator marking a test that requires a TPU (in PyTorch).
"""
if not is_torch_tpu_available():
return unittest.skip("test requires PyTorch TPU")
else:
return test_case
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
if torch_device != "cuda":
return unittest.skip("test requires CUDA")(test_case)
else:
return test_case
def require_torch_bf16(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.10."""
if not is_torch_bf16_available():
return unittest.skip("test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.10")(test_case)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
if not is_torch_tf32_available():
return unittest.skip("test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7")(test_case)
else:
return test_case
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
if not is_detectron2_available():
return unittest.skip("test requires `detectron2`")(test_case)
else:
return test_case
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
if not is_faiss_available():
return unittest.skip("test requires `faiss`")(test_case)
else:
return test_case
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
if not is_optuna_available():
return unittest.skip("test requires optuna")(test_case)
else:
return test_case
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
if not is_ray_available():
return unittest.skip("test requires Ray/tune")(test_case)
else:
return test_case
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
if not is_sigopt_available():
return unittest.skip("test requires SigOpt")(test_case)
else:
return test_case
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
if not is_soundfile_availble():
return unittest.skip("test requires soundfile")(test_case)
else:
return test_case
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
if not is_deepspeed_available():
return unittest.skip("test requires deepspeed")(test_case)
else:
return test_case
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
if not is_phonemizer_available():
return unittest.skip("test requires phonemizer")(test_case)
else:
return test_case
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
if not is_pyctcdecode_available():
return unittest.skip("test requires pyctcdecode")(test_case)
else:
return test_case
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
if not is_librosa_available():
return unittest.skip("test requires librosa")(test_case)
else:
return test_case
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = "reports"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{id}_{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
with open(report_files["passes"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
from transformers.tokenization_utils import BatchEncoding
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, (dict, BatchEncoding)):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import inspect
import logging
import os
import re
import shutil
import sys
import tempfile
import unittest
from distutils.util import strtobool
from io import StringIO
from pathlib import Path
from typing import Iterator, Union
from unittest import mock
from transformers import logging as transformers_logging
from .deepspeed import is_deepspeed_available
from .file_utils import (
is_detectron2_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_librosa_available,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_scatter_available,
is_sentencepiece_available,
is_soundfile_availble,
is_spacy_available,
is_tensorflow_probability_available,
is_tf2onnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available,
is_torch_tf32_available,
is_torch_tpu_available,
is_torchaudio_available,
is_vision_available,
)
from .integrations import is_optuna_available, is_ray_available, is_sigopt_available
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
PASS = "__DUMMY_TRANSFORMERS_PASS__"
ENDPOINT_STAGING = "https://moon-staging.huggingface.co"
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=False)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=False)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=False)
_run_git_lfs_tests = parse_flag_from_env("RUN_GIT_LFS_TESTS", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip("test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip("test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test.
Pipeline tests are skipped by default and we can run only them by setting RUN_PIPELINE_TESTS environment variable
to a truthy value and selecting the is_pipeline_test pytest mark.
"""
if not _run_pipeline_tests:
return unittest.skip("test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip("test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
if not _run_slow_tests:
return unittest.skip("test is slow")(test_case)
else:
return test_case
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip("test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
if not _run_custom_tokenizers:
return unittest.skip("test of custom tokenizers")(test_case)
else:
return test_case
def require_git_lfs(test_case):
"""
Decorator marking a test that requires git-lfs.
git-lfs requires additional dependencies, and tests are skipped by default. Set the RUN_GIT_LFS_TESTS environment
variable to a truthy value to run them.
"""
if not _run_git_lfs_tests:
return unittest.skip("test of git lfs workflow")(test_case)
else:
return test_case
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
if not is_rjieba_available():
return unittest.skip("test requires rjieba")(test_case)
else:
return test_case
def require_tf2onnx(test_case):
if not is_tf2onnx_available():
return unittest.skip("test requires tf2onnx")(test_case)
else:
return test_case
def require_onnx(test_case):
if not is_onnx_available():
return unittest.skip("test requires ONNX")(test_case)
else:
return test_case
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
if not is_timm_available():
return unittest.skip("test requires Timm")(test_case)
else:
return test_case
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
else:
return test_case
def require_torch_scatter(test_case):
"""
Decorator marking a test that requires PyTorch scatter.
These tests are skipped when PyTorch scatter isn't installed.
"""
if not is_scatter_available():
return unittest.skip("test requires PyTorch scatter")(test_case)
else:
return test_case
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
if not is_tensorflow_probability_available():
return unittest.skip("test requires TensorFlow probability")(test_case)
else:
return test_case
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
if not is_torchaudio_available():
return unittest.skip("test requires torchaudio")(test_case)
else:
return test_case
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
if not is_tf_available():
return unittest.skip("test requires TensorFlow")(test_case)
else:
return test_case
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
if not is_flax_available():
test_case = unittest.skip("test requires JAX & Flax")(test_case)
return test_case
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
if not is_sentencepiece_available():
return unittest.skip("test requires SentencePiece")(test_case)
else:
return test_case
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
if not is_tokenizers_available():
return unittest.skip("test requires tokenizers")(test_case)
else:
return test_case
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
if not is_pandas_available():
return unittest.skip("test requires pandas")(test_case)
else:
return test_case
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
if not is_pytesseract_available():
return unittest.skip("test requires PyTesseract")(test_case)
else:
return test_case
def require_scatter(test_case):
"""
Decorator marking a test that requires PyTorch Scatter. These tests are skipped when PyTorch Scatter isn't
installed.
"""
if not is_scatter_available():
return unittest.skip("test requires PyTorch Scatter")(test_case)
else:
return test_case
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
if not is_pytorch_quantization_available():
return unittest.skip("test requires PyTorch Quantization Toolkit")(test_case)
else:
return test_case
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
if not is_vision_available():
return unittest.skip("test requires vision")(test_case)
else:
return test_case
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
if not is_ftfy_available():
return unittest.skip("test requires ftfy")(test_case)
else:
return test_case
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
if not is_spacy_available():
return unittest.skip("test requires spacy")(test_case)
else:
return test_case
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() < 2:
return unittest.skip("test requires multiple GPUs")(test_case)
else:
return test_case
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() > 1:
return unittest.skip("test requires 0 or 1 GPU")(test_case)
else:
return test_case
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() > 2:
return unittest.skip("test requires 0 or 1 or 2 GPUs")(test_case)
else:
return test_case
def require_torch_tpu(test_case):
"""
Decorator marking a test that requires a TPU (in PyTorch).
"""
if not is_torch_tpu_available():
return unittest.skip("test requires PyTorch TPU")
else:
return test_case
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
if torch_device != "cuda":
return unittest.skip("test requires CUDA")(test_case)
else:
return test_case
def require_torch_bf16(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.10."""
if not is_torch_bf16_available():
return unittest.skip("test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.10")(test_case)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
if not is_torch_tf32_available():
return unittest.skip("test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7")(test_case)
else:
return test_case
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
if not is_detectron2_available():
return unittest.skip("test requires `detectron2`")(test_case)
else:
return test_case
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
if not is_faiss_available():
return unittest.skip("test requires `faiss`")(test_case)
else:
return test_case
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
if not is_optuna_available():
return unittest.skip("test requires optuna")(test_case)
else:
return test_case
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
if not is_ray_available():
return unittest.skip("test requires Ray/tune")(test_case)
else:
return test_case
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
if not is_sigopt_available():
return unittest.skip("test requires SigOpt")(test_case)
else:
return test_case
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
if not is_soundfile_availble():
return unittest.skip("test requires soundfile")(test_case)
else:
return test_case
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
if not is_deepspeed_available():
return unittest.skip("test requires deepspeed")(test_case)
else:
return test_case
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
if not is_phonemizer_available():
return unittest.skip("test requires phonemizer")(test_case)
else:
return test_case
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
if not is_pyctcdecode_available():
return unittest.skip("test requires pyctcdecode")(test_case)
else:
return test_case
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
if not is_librosa_available():
return unittest.skip("test requires librosa")(test_case)
else:
return test_case
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = "reports"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{id}_{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
with open(report_files["passes"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
from transformers.tokenization_utils import BatchEncoding
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, (dict, BatchEncoding)):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env(
"RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(
min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(
torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(
torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError(
"/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(
urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(
f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(
input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(
codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(
r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed,
"cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {
"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {
"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(
f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError(
"Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env("RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env("RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env("RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env("RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError("/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
tr.reportchars = "wPpsxXEf" # emulate -rA (used in summary_passes() and short_test_summary())
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed, "cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError("Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| 2024-09-07 07:07:51.173273: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
2024-09-07 07:07:51.187509: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:485] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered
2024-09-07 07:07:51.203442: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:8454] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered
2024-09-07 07:07:51.208236: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1452] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered
2024-09-07 07:07:51.220850: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: AVX2 AVX512F AVX512_VNNI AVX512_BF16 AVX512_FP16 AVX_VNNI AMX_TILE AMX_INT8 AMX_BF16 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
2024-09-07 07:07:52.252890: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
ImportError while loading conftest '/workspace/test_repo/conftest.py'.
conftest.py:26: in <module>
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
src/transformers/testing_utils.py:54: in <module>
from .utils import (
E ImportError: cannot import name 'GGUF_MIN_VERSION' from 'transformers.utils' (/workspace/test_repo/src/transformers/utils/__init__.py)
| {'total': 17, 'passed': 6, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 3, 'warning': 8, 'error': 0} | {'total': 0, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | ./test_repo/src/transformers/testing_utils.py | ./test_repo/examples/tensorflow/test_tensorflow_examples.py |
2_transformers_callee__testing_utils__require_onnx__c8d3fa0dfd191c0272f8de5027430e2fc789b22c | callee | fail-to-pass | https://github.com/huggingface/transformers | c8d3fa0dfd191c0272f8de5027430e2fc789b22c | function | require_onnx | testing_utils.py | def require_onnx(test_case):
if not is_onnx_available():
return unittest.skip("test requires ONNX")(test_case)
else:
return test_case
| def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import logging
import os
import re
import shutil
import sys
import tempfile
import unittest
from distutils.util import strtobool
from io import StringIO
from pathlib import Path
from .file_utils import (
is_datasets_available,
is_faiss_available,
is_flax_available,
is_onnx_available,
is_pandas_available,
is_scatter_available,
is_sentencepiece_available,
is_soundfile_availble,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_torch_tpu_available,
)
from .integrations import is_optuna_available, is_ray_available
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKWOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError("If set, {} must be yes or no.".format(key))
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError("If set, {} must be a int.".format(key))
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=False)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=False)
_run_git_lfs_tests = parse_flag_from_env("RUN_GIT_LFS_TESTS", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip("test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test.
Pipeline tests are skipped by default and we can run only them by setting RUN_PIPELINE_TESTS environment variable
to a truthy value and selecting the is_pipeline_test pytest mark.
"""
if not _run_pipeline_tests:
return unittest.skip("test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
if not _run_slow_tests:
return unittest.skip("test is slow")(test_case)
else:
return test_case
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
if not _run_custom_tokenizers:
return unittest.skip("test of custom tokenizers")(test_case)
else:
return test_case
def require_git_lfs(test_case):
"""
Decorator marking a test that requires git-lfs.
git-lfs requires additional dependencies, and tests are skipped by default. Set the RUN_GIT_LFS_TESTS environment
variable to a truthy value to run them.
"""
if not _run_git_lfs_tests:
return unittest.skip("test of git lfs workflow")(test_case)
else:
return test_case
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
else:
return test_case
def require_torch_scatter(test_case):
"""
Decorator marking a test that requires PyTorch scatter.
These tests are skipped when PyTorch scatter isn't installed.
"""
if not is_scatter_available():
return unittest.skip("test requires PyTorch scatter")(test_case)
else:
return test_case
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow.
These tests are skipped when TensorFlow isn't installed.
"""
if not is_tf_available():
return unittest.skip("test requires TensorFlow")(test_case)
else:
return test_case
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax
These tests are skipped when one / both are not installed
"""
if not is_flax_available():
test_case = unittest.skip("test requires JAX & Flax")(test_case)
return test_case
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece.
These tests are skipped when SentencePiece isn't installed.
"""
if not is_sentencepiece_available():
return unittest.skip("test requires SentencePiece")(test_case)
else:
return test_case
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers.
These tests are skipped when 🤗 Tokenizers isn't installed.
"""
if not is_tokenizers_available():
return unittest.skip("test requires tokenizers")(test_case)
else:
return test_case
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
if not is_pandas_available():
return unittest.skip("test requires pandas")(test_case)
else:
return test_case
def require_scatter(test_case):
"""
Decorator marking a test that requires PyTorch Scatter. These tests are skipped when PyTorch Scatter isn't
installed.
"""
if not is_scatter_available():
return unittest.skip("test requires PyTorch Scatter")(test_case)
else:
return test_case
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch).
These tests are skipped on a machine without multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() < 2:
return unittest.skip("test requires multiple GPUs")(test_case)
else:
return test_case
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() > 1:
return unittest.skip("test requires 0 or 1 GPU")(test_case)
else:
return test_case
# this is a decorator identical to require_torch_non_multi_gpu, but is used as a quick band-aid to
# allow all of examples to be run multi-gpu CI and it reminds us that tests decorated with this one
# need to be ported and aren't so by design.
require_torch_non_multi_gpu_but_fix_me = require_torch_non_multi_gpu
def require_torch_tpu(test_case):
"""
Decorator marking a test that requires a TPU (in PyTorch).
"""
if not is_torch_tpu_available():
return unittest.skip("test requires PyTorch TPU")
else:
return test_case
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
else:
torch_device = None
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch. """
if torch_device != "cuda":
return unittest.skip("test requires CUDA")(test_case)
else:
return test_case
def require_datasets(test_case):
"""Decorator marking a test that requires datasets."""
if not is_datasets_available():
return unittest.skip("test requires `datasets`")(test_case)
else:
return test_case
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
if not is_faiss_available():
return unittest.skip("test requires `faiss`")(test_case)
else:
return test_case
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
if not is_optuna_available():
return unittest.skip("test requires optuna")(test_case)
else:
return test_case
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
if not is_ray_available():
return unittest.skip("test requires Ray/tune")(test_case)
else:
return test_case
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
if not is_soundfile_availble():
return unittest.skip("test requires soundfile")(test_case)
else:
return test_case
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch or tf is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
stdout, clean it up and make it available via obj.out stderr, and make it available via obj.err
init arguments: - out - capture stdout: True/False, default True - err - capture stdout: True/False, default
True
Examples::
with CaptureStdout() as cs:
print("Secret message")
print(f"captured: {cs.out}")
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
print(f"captured: {cs.err}")
# to capture just one of the streams, but not the other
with CaptureStd(err=False) as cs:
print("Secret message")
print(f"captured: {cs.out}")
# but best use the stream-specific subclasses
"""
def __init__(self, out=True, err=True):
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
self.out = apply_print_resets(self.out_buf.getvalue())
if self.err_buf:
sys.stderr = self.err_old
self.err = self.err_buf.getvalue()
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
""" Same as CaptureStd but captures only stdout """
def __init__(self):
super().__init__(err=False)
class CaptureStderr(CaptureStd):
""" Same as CaptureStd but captures only stderr """
def __init__(self):
super().__init__(out=False)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
- logger: 'logging` logger object
Results:
The captured output is available via `self.out`
Example::
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg+"\n"
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
class TestCasePlus(unittest.TestCase):
"""
This class extends `unittest.TestCase` with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
* ``pathlib`` objects (all fully resolved):
- ``test_file_path`` - the current test file path (=``__file__``)
- ``test_file_dir`` - the directory containing the current test file
- ``tests_dir`` - the directory of the ``tests`` test suite
- ``examples_dir`` - the directory of the ``examples`` test suite
- ``repo_root_dir`` - the directory of the repository
- ``src_dir`` - the directory of ``src`` (i.e. where the ``transformers`` sub-dir resides)
* stringified paths---same as above but these return paths as strings, rather than ``pathlib`` objects:
- ``test_file_path_str``
- ``test_file_dir_str``
- ``tests_dir_str``
- ``examples_dir_str``
- ``repo_root_dir_str``
- ``src_dir_str``
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
::
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
``tmp_dir`` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
::
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the ``before`` and ``after`` args, leading to the
following behavior:
``before=True``: the temporary dir will always be cleared at the beginning of the test.
``before=False``: if the temporary dir already existed, any existing files will remain there.
``after=True``: the temporary dir will always be deleted at the end of the test.
``after=False``: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of ``rm -r`` safely, only subdirs of the project repository checkout are
allowed if an explicit ``tmp_dir`` is used, so that by mistake no ``/tmp`` or similar important part of the
filesystem will get nuked. i.e. please always pass paths that start with ``./``
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the ``os.environ`` object that sets up ``PYTHONPATH`` specific to the current test suite.
This is useful for invoking external programs from the test suite - e.g. distributed training.
::
def test_whatever(self):
env = self.get_env()
"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the ``os.environ`` object that sets up ``PYTHONPATH`` correctly, depending on the test suite
it's invoked from. This is useful for invoking external programs from the test suite - e.g. distributed
training.
It always inserts ``./src`` first, then ``./tests`` or ``./examples`` depending on the test suite type and
finally the preset ``PYTHONPATH`` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (:obj:`string`, `optional`):
if :obj:`None`:
- a unique temporary path will be created
- sets ``before=True`` if ``before`` is :obj:`None`
- sets ``after=True`` if ``after`` is :obj:`None`
else:
- :obj:`tmp_dir` will be created
- sets ``before=True`` if ``before`` is :obj:`None`
- sets ``after=False`` if ``after`` is :obj:`None`
before (:obj:`bool`, `optional`):
If :obj:`True` and the :obj:`tmp_dir` already exists, make sure to empty it right away if :obj:`False`
and the :obj:`tmp_dir` already exists, any existing files will remain there.
after (:obj:`bool`, `optional`):
If :obj:`True`, delete the :obj:`tmp_dir` at the end of the test if :obj:`False`, leave the
:obj:`tmp_dir` and its contents intact at the end of the test.
Returns:
tmp_dir(:obj:`string`): either the same value as passed via `tmp_dir` or the path to the auto-selected tmp
dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this:
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something(): run_slow = os.getenv("RUN_SLOW", False) use_tf =
os.getenv("USE_TF", False)
"""
return unittest.mock.patch.dict(os.environ, kwargs)
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should
pytest do internal changes - also it calls default internal methods of terminalreporter which
can be hijacked by various `pytest-` plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = "reports"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{id}_{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
with open(report_files["passes"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import logging
import os
import re
import shutil
import sys
import tempfile
import unittest
from distutils.util import strtobool
from io import StringIO
from pathlib import Path
from .file_utils import (
is_datasets_available,
is_faiss_available,
is_flax_available,
is_onnx_available,
is_pandas_available,
is_scatter_available,
is_sentencepiece_available,
is_soundfile_availble,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_torch_tpu_available,
)
from .integrations import is_optuna_available, is_ray_available
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKWOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError("If set, {} must be yes or no.".format(key))
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError("If set, {} must be a int.".format(key))
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=False)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=False)
_run_git_lfs_tests = parse_flag_from_env("RUN_GIT_LFS_TESTS", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip("test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test.
Pipeline tests are skipped by default and we can run only them by setting RUN_PIPELINE_TESTS environment variable
to a truthy value and selecting the is_pipeline_test pytest mark.
"""
if not _run_pipeline_tests:
return unittest.skip("test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
if not _run_slow_tests:
return unittest.skip("test is slow")(test_case)
else:
return test_case
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
if not _run_custom_tokenizers:
return unittest.skip("test of custom tokenizers")(test_case)
else:
return test_case
def require_git_lfs(test_case):
"""
Decorator marking a test that requires git-lfs.
git-lfs requires additional dependencies, and tests are skipped by default. Set the RUN_GIT_LFS_TESTS environment
variable to a truthy value to run them.
"""
if not _run_git_lfs_tests:
return unittest.skip("test of git lfs workflow")(test_case)
else:
return test_case
def require_onnx(test_case):
if not is_onnx_available():
return unittest.skip("test requires ONNX")(test_case)
else:
return test_case
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
else:
return test_case
def require_torch_scatter(test_case):
"""
Decorator marking a test that requires PyTorch scatter.
These tests are skipped when PyTorch scatter isn't installed.
"""
if not is_scatter_available():
return unittest.skip("test requires PyTorch scatter")(test_case)
else:
return test_case
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow.
These tests are skipped when TensorFlow isn't installed.
"""
if not is_tf_available():
return unittest.skip("test requires TensorFlow")(test_case)
else:
return test_case
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax
These tests are skipped when one / both are not installed
"""
if not is_flax_available():
test_case = unittest.skip("test requires JAX & Flax")(test_case)
return test_case
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece.
These tests are skipped when SentencePiece isn't installed.
"""
if not is_sentencepiece_available():
return unittest.skip("test requires SentencePiece")(test_case)
else:
return test_case
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers.
These tests are skipped when 🤗 Tokenizers isn't installed.
"""
if not is_tokenizers_available():
return unittest.skip("test requires tokenizers")(test_case)
else:
return test_case
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
if not is_pandas_available():
return unittest.skip("test requires pandas")(test_case)
else:
return test_case
def require_scatter(test_case):
"""
Decorator marking a test that requires PyTorch Scatter. These tests are skipped when PyTorch Scatter isn't
installed.
"""
if not is_scatter_available():
return unittest.skip("test requires PyTorch Scatter")(test_case)
else:
return test_case
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch).
These tests are skipped on a machine without multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() < 2:
return unittest.skip("test requires multiple GPUs")(test_case)
else:
return test_case
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() > 1:
return unittest.skip("test requires 0 or 1 GPU")(test_case)
else:
return test_case
# this is a decorator identical to require_torch_non_multi_gpu, but is used as a quick band-aid to
# allow all of examples to be run multi-gpu CI and it reminds us that tests decorated with this one
# need to be ported and aren't so by design.
require_torch_non_multi_gpu_but_fix_me = require_torch_non_multi_gpu
def require_torch_tpu(test_case):
"""
Decorator marking a test that requires a TPU (in PyTorch).
"""
if not is_torch_tpu_available():
return unittest.skip("test requires PyTorch TPU")
else:
return test_case
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
else:
torch_device = None
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch. """
if torch_device != "cuda":
return unittest.skip("test requires CUDA")(test_case)
else:
return test_case
def require_datasets(test_case):
"""Decorator marking a test that requires datasets."""
if not is_datasets_available():
return unittest.skip("test requires `datasets`")(test_case)
else:
return test_case
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
if not is_faiss_available():
return unittest.skip("test requires `faiss`")(test_case)
else:
return test_case
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
if not is_optuna_available():
return unittest.skip("test requires optuna")(test_case)
else:
return test_case
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
if not is_ray_available():
return unittest.skip("test requires Ray/tune")(test_case)
else:
return test_case
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
if not is_soundfile_availble():
return unittest.skip("test requires soundfile")(test_case)
else:
return test_case
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch or tf is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
stdout, clean it up and make it available via obj.out stderr, and make it available via obj.err
init arguments: - out - capture stdout: True/False, default True - err - capture stdout: True/False, default
True
Examples::
with CaptureStdout() as cs:
print("Secret message")
print(f"captured: {cs.out}")
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
print(f"captured: {cs.err}")
# to capture just one of the streams, but not the other
with CaptureStd(err=False) as cs:
print("Secret message")
print(f"captured: {cs.out}")
# but best use the stream-specific subclasses
"""
def __init__(self, out=True, err=True):
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
self.out = apply_print_resets(self.out_buf.getvalue())
if self.err_buf:
sys.stderr = self.err_old
self.err = self.err_buf.getvalue()
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
""" Same as CaptureStd but captures only stdout """
def __init__(self):
super().__init__(err=False)
class CaptureStderr(CaptureStd):
""" Same as CaptureStd but captures only stderr """
def __init__(self):
super().__init__(out=False)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
- logger: 'logging` logger object
Results:
The captured output is available via `self.out`
Example::
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg+"\n"
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
class TestCasePlus(unittest.TestCase):
"""
This class extends `unittest.TestCase` with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
* ``pathlib`` objects (all fully resolved):
- ``test_file_path`` - the current test file path (=``__file__``)
- ``test_file_dir`` - the directory containing the current test file
- ``tests_dir`` - the directory of the ``tests`` test suite
- ``examples_dir`` - the directory of the ``examples`` test suite
- ``repo_root_dir`` - the directory of the repository
- ``src_dir`` - the directory of ``src`` (i.e. where the ``transformers`` sub-dir resides)
* stringified paths---same as above but these return paths as strings, rather than ``pathlib`` objects:
- ``test_file_path_str``
- ``test_file_dir_str``
- ``tests_dir_str``
- ``examples_dir_str``
- ``repo_root_dir_str``
- ``src_dir_str``
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
::
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
``tmp_dir`` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
::
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the ``before`` and ``after`` args, leading to the
following behavior:
``before=True``: the temporary dir will always be cleared at the beginning of the test.
``before=False``: if the temporary dir already existed, any existing files will remain there.
``after=True``: the temporary dir will always be deleted at the end of the test.
``after=False``: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of ``rm -r`` safely, only subdirs of the project repository checkout are
allowed if an explicit ``tmp_dir`` is used, so that by mistake no ``/tmp`` or similar important part of the
filesystem will get nuked. i.e. please always pass paths that start with ``./``
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the ``os.environ`` object that sets up ``PYTHONPATH`` specific to the current test suite.
This is useful for invoking external programs from the test suite - e.g. distributed training.
::
def test_whatever(self):
env = self.get_env()
"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the ``os.environ`` object that sets up ``PYTHONPATH`` correctly, depending on the test suite
it's invoked from. This is useful for invoking external programs from the test suite - e.g. distributed
training.
It always inserts ``./src`` first, then ``./tests`` or ``./examples`` depending on the test suite type and
finally the preset ``PYTHONPATH`` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (:obj:`string`, `optional`):
if :obj:`None`:
- a unique temporary path will be created
- sets ``before=True`` if ``before`` is :obj:`None`
- sets ``after=True`` if ``after`` is :obj:`None`
else:
- :obj:`tmp_dir` will be created
- sets ``before=True`` if ``before`` is :obj:`None`
- sets ``after=False`` if ``after`` is :obj:`None`
before (:obj:`bool`, `optional`):
If :obj:`True` and the :obj:`tmp_dir` already exists, make sure to empty it right away if :obj:`False`
and the :obj:`tmp_dir` already exists, any existing files will remain there.
after (:obj:`bool`, `optional`):
If :obj:`True`, delete the :obj:`tmp_dir` at the end of the test if :obj:`False`, leave the
:obj:`tmp_dir` and its contents intact at the end of the test.
Returns:
tmp_dir(:obj:`string`): either the same value as passed via `tmp_dir` or the path to the auto-selected tmp
dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this:
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something(): run_slow = os.getenv("RUN_SLOW", False) use_tf =
os.getenv("USE_TF", False)
"""
return unittest.mock.patch.dict(os.environ, kwargs)
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should
pytest do internal changes - also it calls default internal methods of terminalreporter which
can be hijacked by various `pytest-` plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = "reports"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{id}_{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
with open(report_files["passes"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env(
"RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(
min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(
torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(
torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError(
"/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(
urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(
f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(
input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(
codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(
r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed,
"cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {
"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {
"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(
f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError(
"Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env("RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env("RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env("RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env("RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError("/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
tr.reportchars = "wPpsxXEf" # emulate -rA (used in summary_passes() and short_test_summary())
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed, "cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError("Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| 2024-09-07 07:11:45.448148: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
2024-09-07 07:11:45.462371: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:485] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered
2024-09-07 07:11:45.478313: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:8454] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered
2024-09-07 07:11:45.483106: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1452] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered
2024-09-07 07:11:45.495692: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: AVX2 AVX512F AVX512_VNNI AVX512_BF16 AVX512_FP16 AVX_VNNI AMX_TILE AMX_INT8 AMX_BF16 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
2024-09-07 07:11:46.582420: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
ImportError while loading conftest '/workspace/test_repo/conftest.py'.
conftest.py:26: in <module>
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
src/transformers/testing_utils.py:54: in <module>
from .utils import (
E ImportError: cannot import name 'GGUF_MIN_VERSION' from 'transformers.utils' (/workspace/test_repo/src/transformers/utils/__init__.py)
| {'total': 17, 'passed': 6, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 3, 'warning': 8, 'error': 0} | {'total': 0, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | ./test_repo/src/transformers/testing_utils.py | ./test_repo/examples/tensorflow/test_tensorflow_examples.py |
2_transformers_callee__testing_utils__require_timm__d3eacbb8299161d21e007e7e3d42505dae741282 | callee | fail-to-pass | https://github.com/huggingface/transformers | d3eacbb8299161d21e007e7e3d42505dae741282 | function | require_timm | testing_utils.py | def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
if not is_timm_available():
return unittest.skip("test requires Timm")(test_case)
else:
return test_case
| def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import inspect
import logging
import os
import re
import shutil
import sys
import tempfile
import unittest
from distutils.util import strtobool
from io import StringIO
from pathlib import Path
from typing import Iterator, Union
from transformers import logging as transformers_logging
from .deepspeed import is_deepspeed_available
from .file_utils import (
is_datasets_available,
is_faiss_available,
is_flax_available,
is_onnx_available,
is_pandas_available,
is_scatter_available,
is_sentencepiece_available,
is_soundfile_availble,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_tpu_available,
is_torchaudio_available,
is_vision_available,
)
from .integrations import is_optuna_available, is_ray_available
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKWOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
PASS = "__DUMMY_TRANSFORMERS_PASS__"
ENDPOINT_STAGING = "https://moon-staging.huggingface.co"
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=False)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=False)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=False)
_run_git_lfs_tests = parse_flag_from_env("RUN_GIT_LFS_TESTS", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip("test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip("test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test.
Pipeline tests are skipped by default and we can run only them by setting RUN_PIPELINE_TESTS environment variable
to a truthy value and selecting the is_pipeline_test pytest mark.
"""
if not _run_pipeline_tests:
return unittest.skip("test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip("test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
if not _run_slow_tests:
return unittest.skip("test is slow")(test_case)
else:
return test_case
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip("test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
if not _run_custom_tokenizers:
return unittest.skip("test of custom tokenizers")(test_case)
else:
return test_case
def require_git_lfs(test_case):
"""
Decorator marking a test that requires git-lfs.
git-lfs requires additional dependencies, and tests are skipped by default. Set the RUN_GIT_LFS_TESTS environment
variable to a truthy value to run them.
"""
if not _run_git_lfs_tests:
return unittest.skip("test of git lfs workflow")(test_case)
else:
return test_case
def require_onnx(test_case):
if not is_onnx_available():
return unittest.skip("test requires ONNX")(test_case)
else:
return test_case
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
else:
return test_case
def require_torch_scatter(test_case):
"""
Decorator marking a test that requires PyTorch scatter.
These tests are skipped when PyTorch scatter isn't installed.
"""
if not is_scatter_available():
return unittest.skip("test requires PyTorch scatter")(test_case)
else:
return test_case
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
if not is_torchaudio_available():
return unittest.skip("test requires torchaudio")(test_case)
else:
return test_case
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
if not is_tf_available():
return unittest.skip("test requires TensorFlow")(test_case)
else:
return test_case
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
if not is_flax_available():
test_case = unittest.skip("test requires JAX & Flax")(test_case)
return test_case
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
if not is_sentencepiece_available():
return unittest.skip("test requires SentencePiece")(test_case)
else:
return test_case
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
if not is_tokenizers_available():
return unittest.skip("test requires tokenizers")(test_case)
else:
return test_case
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
if not is_pandas_available():
return unittest.skip("test requires pandas")(test_case)
else:
return test_case
def require_scatter(test_case):
"""
Decorator marking a test that requires PyTorch Scatter. These tests are skipped when PyTorch Scatter isn't
installed.
"""
if not is_scatter_available():
return unittest.skip("test requires PyTorch Scatter")(test_case)
else:
return test_case
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
if not is_vision_available():
return unittest.skip("test requires vision")(test_case)
else:
return test_case
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() < 2:
return unittest.skip("test requires multiple GPUs")(test_case)
else:
return test_case
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() > 1:
return unittest.skip("test requires 0 or 1 GPU")(test_case)
else:
return test_case
def require_torch_tpu(test_case):
"""
Decorator marking a test that requires a TPU (in PyTorch).
"""
if not is_torch_tpu_available():
return unittest.skip("test requires PyTorch TPU")
else:
return test_case
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
if torch_device != "cuda":
return unittest.skip("test requires CUDA")(test_case)
else:
return test_case
def require_datasets(test_case):
"""Decorator marking a test that requires datasets."""
if not is_datasets_available():
return unittest.skip("test requires `datasets`")(test_case)
else:
return test_case
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
if not is_faiss_available():
return unittest.skip("test requires `faiss`")(test_case)
else:
return test_case
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
if not is_optuna_available():
return unittest.skip("test requires optuna")(test_case)
else:
return test_case
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
if not is_ray_available():
return unittest.skip("test requires Ray/tune")(test_case)
else:
return test_case
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
if not is_soundfile_availble():
return unittest.skip("test requires soundfile")(test_case)
else:
return test_case
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
if not is_deepspeed_available():
return unittest.skip("test requires deepspeed")(test_case)
else:
return test_case
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch or tf is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout, clean it up and make it available via obj.out
- stderr, and make it available via obj.err
init arguments:
- out - capture stdout: True/False, default True
- err - capture stdout: True/False, default True
Examples::
with CaptureStdout() as cs:
print("Secret message")
print(f"captured: {cs.out}")
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
print(f"captured: {cs.err}")
# to capture just one of the streams, but not the other
with CaptureStd(err=False) as cs:
print("Secret message")
print(f"captured: {cs.out}")
# but best use the stream-specific subclasses
"""
def __init__(self, out=True, err=True):
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
self.out = apply_print_resets(self.out_buf.getvalue())
if self.err_buf:
sys.stderr = self.err_old
self.err = self.err_buf.getvalue()
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self):
super().__init__(err=False)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self):
super().__init__(out=False)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
- logger: 'logging` logger object
Results:
The captured output is available via `self.out`
Example::
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg+"\n"
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
For example ::
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("gpt2") # calls logger.info() several times
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage ::
with ExtendSysPath('/path/to/dir'):
mymodule = importlib.import_module('mymodule')
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends `unittest.TestCase` with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
* ``pathlib`` objects (all fully resolved):
- ``test_file_path`` - the current test file path (=``__file__``)
- ``test_file_dir`` - the directory containing the current test file
- ``tests_dir`` - the directory of the ``tests`` test suite
- ``examples_dir`` - the directory of the ``examples`` test suite
- ``repo_root_dir`` - the directory of the repository
- ``src_dir`` - the directory of ``src`` (i.e. where the ``transformers`` sub-dir resides)
* stringified paths---same as above but these return paths as strings, rather than ``pathlib`` objects:
- ``test_file_path_str``
- ``test_file_dir_str``
- ``tests_dir_str``
- ``examples_dir_str``
- ``repo_root_dir_str``
- ``src_dir_str``
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
::
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
``tmp_dir`` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
::
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the ``before`` and ``after`` args, leading to the
following behavior:
``before=True``: the temporary dir will always be cleared at the beginning of the test.
``before=False``: if the temporary dir already existed, any existing files will remain there.
``after=True``: the temporary dir will always be deleted at the end of the test.
``after=False``: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of ``rm -r`` safely, only subdirs of the project repository checkout are
allowed if an explicit ``tmp_dir`` is used, so that by mistake no ``/tmp`` or similar important part of the
filesystem will get nuked. i.e. please always pass paths that start with ``./``
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the ``os.environ`` object that sets up ``PYTHONPATH`` specific to the current test suite.
This is useful for invoking external programs from the test suite - e.g. distributed training.
::
def test_whatever(self):
env = self.get_env()
"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the ``os.environ`` object that sets up ``PYTHONPATH`` correctly, depending on the test suite
it's invoked from. This is useful for invoking external programs from the test suite - e.g. distributed
training.
It always inserts ``./src`` first, then ``./tests`` or ``./examples`` depending on the test suite type and
finally the preset ``PYTHONPATH`` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (:obj:`string`, `optional`):
if :obj:`None`:
- a unique temporary path will be created
- sets ``before=True`` if ``before`` is :obj:`None`
- sets ``after=True`` if ``after`` is :obj:`None`
else:
- :obj:`tmp_dir` will be created
- sets ``before=True`` if ``before`` is :obj:`None`
- sets ``after=False`` if ``after`` is :obj:`None`
before (:obj:`bool`, `optional`):
If :obj:`True` and the :obj:`tmp_dir` already exists, make sure to empty it right away if :obj:`False`
and the :obj:`tmp_dir` already exists, any existing files will remain there.
after (:obj:`bool`, `optional`):
If :obj:`True`, delete the :obj:`tmp_dir` at the end of the test if :obj:`False`, leave the
:obj:`tmp_dir` and its contents intact at the end of the test.
Returns:
tmp_dir(:obj:`string`): either the same value as passed via `tmp_dir` or the path to the auto-selected tmp
dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False)
def test_something():
run_slow = os.getenv("RUN_SLOW", False)
use_tf = os.getenv("USE_TF", False)
"""
return unittest.mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the ``os.environ`` dictionary in-place. Similar to mockenv
The ``os.environ`` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should
pytest do internal changes - also it calls default internal methods of terminalreporter which
can be hijacked by various `pytest-` plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = "reports"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{id}_{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
with open(report_files["passes"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
from transformers.tokenization_utils import BatchEncoding
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, (dict, BatchEncoding)):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, np.float32):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import inspect
import logging
import os
import re
import shutil
import sys
import tempfile
import unittest
from distutils.util import strtobool
from io import StringIO
from pathlib import Path
from typing import Iterator, Union
from transformers import logging as transformers_logging
from .deepspeed import is_deepspeed_available
from .file_utils import (
is_datasets_available,
is_faiss_available,
is_flax_available,
is_onnx_available,
is_pandas_available,
is_scatter_available,
is_sentencepiece_available,
is_soundfile_availble,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_tpu_available,
is_torchaudio_available,
is_vision_available,
)
from .integrations import is_optuna_available, is_ray_available
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKWOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
PASS = "__DUMMY_TRANSFORMERS_PASS__"
ENDPOINT_STAGING = "https://moon-staging.huggingface.co"
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=False)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=False)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=False)
_run_git_lfs_tests = parse_flag_from_env("RUN_GIT_LFS_TESTS", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip("test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip("test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test.
Pipeline tests are skipped by default and we can run only them by setting RUN_PIPELINE_TESTS environment variable
to a truthy value and selecting the is_pipeline_test pytest mark.
"""
if not _run_pipeline_tests:
return unittest.skip("test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip("test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
if not _run_slow_tests:
return unittest.skip("test is slow")(test_case)
else:
return test_case
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip("test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
if not _run_custom_tokenizers:
return unittest.skip("test of custom tokenizers")(test_case)
else:
return test_case
def require_git_lfs(test_case):
"""
Decorator marking a test that requires git-lfs.
git-lfs requires additional dependencies, and tests are skipped by default. Set the RUN_GIT_LFS_TESTS environment
variable to a truthy value to run them.
"""
if not _run_git_lfs_tests:
return unittest.skip("test of git lfs workflow")(test_case)
else:
return test_case
def require_onnx(test_case):
if not is_onnx_available():
return unittest.skip("test requires ONNX")(test_case)
else:
return test_case
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
if not is_timm_available():
return unittest.skip("test requires Timm")(test_case)
else:
return test_case
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
else:
return test_case
def require_torch_scatter(test_case):
"""
Decorator marking a test that requires PyTorch scatter.
These tests are skipped when PyTorch scatter isn't installed.
"""
if not is_scatter_available():
return unittest.skip("test requires PyTorch scatter")(test_case)
else:
return test_case
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
if not is_torchaudio_available():
return unittest.skip("test requires torchaudio")(test_case)
else:
return test_case
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
if not is_tf_available():
return unittest.skip("test requires TensorFlow")(test_case)
else:
return test_case
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
if not is_flax_available():
test_case = unittest.skip("test requires JAX & Flax")(test_case)
return test_case
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
if not is_sentencepiece_available():
return unittest.skip("test requires SentencePiece")(test_case)
else:
return test_case
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
if not is_tokenizers_available():
return unittest.skip("test requires tokenizers")(test_case)
else:
return test_case
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
if not is_pandas_available():
return unittest.skip("test requires pandas")(test_case)
else:
return test_case
def require_scatter(test_case):
"""
Decorator marking a test that requires PyTorch Scatter. These tests are skipped when PyTorch Scatter isn't
installed.
"""
if not is_scatter_available():
return unittest.skip("test requires PyTorch Scatter")(test_case)
else:
return test_case
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
if not is_vision_available():
return unittest.skip("test requires vision")(test_case)
else:
return test_case
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() < 2:
return unittest.skip("test requires multiple GPUs")(test_case)
else:
return test_case
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() > 1:
return unittest.skip("test requires 0 or 1 GPU")(test_case)
else:
return test_case
def require_torch_tpu(test_case):
"""
Decorator marking a test that requires a TPU (in PyTorch).
"""
if not is_torch_tpu_available():
return unittest.skip("test requires PyTorch TPU")
else:
return test_case
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
if torch_device != "cuda":
return unittest.skip("test requires CUDA")(test_case)
else:
return test_case
def require_datasets(test_case):
"""Decorator marking a test that requires datasets."""
if not is_datasets_available():
return unittest.skip("test requires `datasets`")(test_case)
else:
return test_case
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
if not is_faiss_available():
return unittest.skip("test requires `faiss`")(test_case)
else:
return test_case
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
if not is_optuna_available():
return unittest.skip("test requires optuna")(test_case)
else:
return test_case
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
if not is_ray_available():
return unittest.skip("test requires Ray/tune")(test_case)
else:
return test_case
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
if not is_soundfile_availble():
return unittest.skip("test requires soundfile")(test_case)
else:
return test_case
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
if not is_deepspeed_available():
return unittest.skip("test requires deepspeed")(test_case)
else:
return test_case
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch or tf is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout, clean it up and make it available via obj.out
- stderr, and make it available via obj.err
init arguments:
- out - capture stdout: True/False, default True
- err - capture stdout: True/False, default True
Examples::
with CaptureStdout() as cs:
print("Secret message")
print(f"captured: {cs.out}")
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
print(f"captured: {cs.err}")
# to capture just one of the streams, but not the other
with CaptureStd(err=False) as cs:
print("Secret message")
print(f"captured: {cs.out}")
# but best use the stream-specific subclasses
"""
def __init__(self, out=True, err=True):
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
self.out = apply_print_resets(self.out_buf.getvalue())
if self.err_buf:
sys.stderr = self.err_old
self.err = self.err_buf.getvalue()
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self):
super().__init__(err=False)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self):
super().__init__(out=False)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
- logger: 'logging` logger object
Results:
The captured output is available via `self.out`
Example::
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg+"\n"
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
For example ::
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("gpt2") # calls logger.info() several times
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage ::
with ExtendSysPath('/path/to/dir'):
mymodule = importlib.import_module('mymodule')
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends `unittest.TestCase` with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
* ``pathlib`` objects (all fully resolved):
- ``test_file_path`` - the current test file path (=``__file__``)
- ``test_file_dir`` - the directory containing the current test file
- ``tests_dir`` - the directory of the ``tests`` test suite
- ``examples_dir`` - the directory of the ``examples`` test suite
- ``repo_root_dir`` - the directory of the repository
- ``src_dir`` - the directory of ``src`` (i.e. where the ``transformers`` sub-dir resides)
* stringified paths---same as above but these return paths as strings, rather than ``pathlib`` objects:
- ``test_file_path_str``
- ``test_file_dir_str``
- ``tests_dir_str``
- ``examples_dir_str``
- ``repo_root_dir_str``
- ``src_dir_str``
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
::
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
``tmp_dir`` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
::
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the ``before`` and ``after`` args, leading to the
following behavior:
``before=True``: the temporary dir will always be cleared at the beginning of the test.
``before=False``: if the temporary dir already existed, any existing files will remain there.
``after=True``: the temporary dir will always be deleted at the end of the test.
``after=False``: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of ``rm -r`` safely, only subdirs of the project repository checkout are
allowed if an explicit ``tmp_dir`` is used, so that by mistake no ``/tmp`` or similar important part of the
filesystem will get nuked. i.e. please always pass paths that start with ``./``
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the ``os.environ`` object that sets up ``PYTHONPATH`` specific to the current test suite.
This is useful for invoking external programs from the test suite - e.g. distributed training.
::
def test_whatever(self):
env = self.get_env()
"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the ``os.environ`` object that sets up ``PYTHONPATH`` correctly, depending on the test suite
it's invoked from. This is useful for invoking external programs from the test suite - e.g. distributed
training.
It always inserts ``./src`` first, then ``./tests`` or ``./examples`` depending on the test suite type and
finally the preset ``PYTHONPATH`` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (:obj:`string`, `optional`):
if :obj:`None`:
- a unique temporary path will be created
- sets ``before=True`` if ``before`` is :obj:`None`
- sets ``after=True`` if ``after`` is :obj:`None`
else:
- :obj:`tmp_dir` will be created
- sets ``before=True`` if ``before`` is :obj:`None`
- sets ``after=False`` if ``after`` is :obj:`None`
before (:obj:`bool`, `optional`):
If :obj:`True` and the :obj:`tmp_dir` already exists, make sure to empty it right away if :obj:`False`
and the :obj:`tmp_dir` already exists, any existing files will remain there.
after (:obj:`bool`, `optional`):
If :obj:`True`, delete the :obj:`tmp_dir` at the end of the test if :obj:`False`, leave the
:obj:`tmp_dir` and its contents intact at the end of the test.
Returns:
tmp_dir(:obj:`string`): either the same value as passed via `tmp_dir` or the path to the auto-selected tmp
dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False)
def test_something():
run_slow = os.getenv("RUN_SLOW", False)
use_tf = os.getenv("USE_TF", False)
"""
return unittest.mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the ``os.environ`` dictionary in-place. Similar to mockenv
The ``os.environ`` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should
pytest do internal changes - also it calls default internal methods of terminalreporter which
can be hijacked by various `pytest-` plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = "reports"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{id}_{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
with open(report_files["passes"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
from transformers.tokenization_utils import BatchEncoding
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, (dict, BatchEncoding)):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, np.float32):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env(
"RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(
min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(
torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(
torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError(
"/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(
urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(
f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(
input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(
codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(
r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed,
"cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {
"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {
"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(
f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError(
"Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env("RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env("RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env("RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env("RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError("/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
tr.reportchars = "wPpsxXEf" # emulate -rA (used in summary_passes() and short_test_summary())
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed, "cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError("Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| 2024-09-07 07:14:45.645655: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
2024-09-07 07:14:45.659543: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:485] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered
2024-09-07 07:14:45.675567: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:8454] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered
2024-09-07 07:14:45.680367: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1452] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered
2024-09-07 07:14:45.693262: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: AVX2 AVX512F AVX512_VNNI AVX512_BF16 AVX512_FP16 AVX_VNNI AMX_TILE AMX_INT8 AMX_BF16 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
2024-09-07 07:14:46.693445: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
ImportError while loading conftest '/workspace/test_repo/conftest.py'.
conftest.py:26: in <module>
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
src/transformers/testing_utils.py:54: in <module>
from .utils import (
E ImportError: cannot import name 'GGUF_MIN_VERSION' from 'transformers.utils' (/workspace/test_repo/src/transformers/utils/__init__.py)
| {'total': 17, 'passed': 6, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 3, 'warning': 8, 'error': 0} | {'total': 0, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | ./test_repo/src/transformers/testing_utils.py | ./test_repo/examples/tensorflow/test_tensorflow_examples.py |
2_transformers_callee__testing_utils__require_torch__13deb95a405bbd1037ad233c692d7fd1de9d31e3 | callee | fail-to-pass | https://github.com/huggingface/transformers | 13deb95a405bbd1037ad233c692d7fd1de9d31e3 | function | require_torch | testing_utils.py | def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
if not _torch_available:
test_case = unittest.skip("test requires PyTorch")(test_case)
return test_case
| def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
| import os
import unittest
from distutils.util import strtobool
from transformers.file_utils import _tf_available, _torch_available
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKWOWN_IDENTIFIER = "julien-c/dummy-unknown"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError("If set, {} must be yes or no.".format(key))
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError("If set, {} must be a int.".format(key))
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable
to a truthy value to run them.
"""
if not _run_slow_tests:
test_case = unittest.skip("test is slow")(test_case)
return test_case
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped
by default. Set the RUN_CUSTOM_TOKENIZERS environment variable
to a truthy value to run them.
"""
if not _run_custom_tokenizers:
test_case = unittest.skip("test of custom tokenizers")(test_case)
return test_case
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow.
These tests are skipped when TensorFlow isn't installed.
"""
if not _tf_available:
test_case = unittest.skip("test requires TensorFlow")(test_case)
return test_case
def require_multigpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch).
These tests are skipped on a machine without multiple GPUs.
To run *only* the multigpu tests, assuming all test names contain multigpu:
$ pytest -sv ./tests -k "multigpu"
"""
if not _torch_available:
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() < 2:
return unittest.skip("test requires multiple GPUs")(test_case)
return test_case
if _torch_available:
# Set the USE_CUDA environment variable to select a GPU.
torch_device = "cuda" if parse_flag_from_env("USE_CUDA") else "cpu"
else:
torch_device = None
| import os
import unittest
from distutils.util import strtobool
from transformers.file_utils import _tf_available, _torch_available
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKWOWN_IDENTIFIER = "julien-c/dummy-unknown"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError("If set, {} must be yes or no.".format(key))
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError("If set, {} must be a int.".format(key))
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable
to a truthy value to run them.
"""
if not _run_slow_tests:
test_case = unittest.skip("test is slow")(test_case)
return test_case
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped
by default. Set the RUN_CUSTOM_TOKENIZERS environment variable
to a truthy value to run them.
"""
if not _run_custom_tokenizers:
test_case = unittest.skip("test of custom tokenizers")(test_case)
return test_case
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
if not _torch_available:
test_case = unittest.skip("test requires PyTorch")(test_case)
return test_case
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow.
These tests are skipped when TensorFlow isn't installed.
"""
if not _tf_available:
test_case = unittest.skip("test requires TensorFlow")(test_case)
return test_case
def require_multigpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch).
These tests are skipped on a machine without multiple GPUs.
To run *only* the multigpu tests, assuming all test names contain multigpu:
$ pytest -sv ./tests -k "multigpu"
"""
if not _torch_available:
return unittest.skip("test requires PyTorch")(test_case)
import torch
if torch.cuda.device_count() < 2:
return unittest.skip("test requires multiple GPUs")(test_case)
return test_case
if _torch_available:
# Set the USE_CUDA environment variable to select a GPU.
torch_device = "cuda" if parse_flag_from_env("USE_CUDA") else "cpu"
else:
torch_device = None
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env(
"RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(
min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(
torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(
torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError(
"/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(
urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(
f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(
input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(
codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(
r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed,
"cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {
"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {
"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(
f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError(
"Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env("RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env("RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env("RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env("RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError("/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
tr.reportchars = "wPpsxXEf" # emulate -rA (used in summary_passes() and short_test_summary())
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed, "cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError("Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| 2024-09-07 07:20:18.256394: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
2024-09-07 07:20:18.270690: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:485] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered
2024-09-07 07:20:18.286905: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:8454] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered
2024-09-07 07:20:18.291753: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1452] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered
2024-09-07 07:20:18.304512: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: AVX2 AVX512F AVX512_VNNI AVX512_BF16 AVX512_FP16 AVX_VNNI AMX_TILE AMX_INT8 AMX_BF16 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
2024-09-07 07:20:19.442102: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
ImportError while loading conftest '/workspace/test_repo/conftest.py'.
conftest.py:26: in <module>
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
src/transformers/testing_utils.py:54: in <module>
from .utils import (
E ImportError: cannot import name 'GGUF_MIN_VERSION' from 'transformers.utils' (/workspace/test_repo/src/transformers/utils/__init__.py)
| {'total': 17, 'passed': 6, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 3, 'warning': 8, 'error': 0} | {'total': 0, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | ./test_repo/src/transformers/testing_utils.py | ./test_repo/examples/tensorflow/test_tensorflow_examples.py |
2_transformers_callee__testing_utils__require_flash_attn__368a58e61cda8418af7f80dce6249cbf19db5e23 | callee | fail-to-pass | https://github.com/huggingface/transformers | 368a58e61cda8418af7f80dce6249cbf19db5e23 | function | require_flash_attn | testing_utils.py | def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_available(), "test requires Flash Attention")(test_case)
| def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections.abc import Mapping
from io import StringIO
from pathlib import Path
from typing import Iterable, Iterator, List, Optional, Union
from unittest import mock
import huggingface_hub
import requests
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_fairscale_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
is_accelerate_available,
is_apex_available,
is_auto_gptq_available,
is_bitsandbytes_available,
is_bs4_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_essentia_available,
is_faiss_available,
is_flash_attn_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_tpu_available,
is_torch_xpu_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
import_path,
)
from _pytest.outcomes import skip
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_tool_tests = parse_flag_from_env("RUN_TOOL_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env(
"RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip("test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip("test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip("test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip("test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_tool_test(test_case):
"""
Decorator marking a test as a tool test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_tool_tests:
return unittest.skip("test is a tool test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_tool_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip("test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_accelerate(test_case):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(is_accelerate_available(), "test requires accelerate")(test_case)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_tpu(test_case):
"""
Decorator marking a test that requires a TPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_tpu_available(check_device=False), "test requires PyTorch TPU")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip("test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU and IPEX.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires IPEX and an XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup with IPEX and atleast one XPU device. These tests are
skipped on a machine without IPEX or multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip("test requires IPEX and atleast one XPU device")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_fairscale(test_case):
"""
Decorator marking a test that requires fairscale
"""
return unittest.skipUnless(is_fairscale_available(), "test requires fairscale")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator for bits and bytes (bnb) dependency
"""
return unittest.skipUnless(is_bitsandbytes_available(), "test requires bnb")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError(
"/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
"""
def __enter__(self):
self.head_request_count = 0
self.get_request_count = 0
self.other_request_count = 0
# Mock `get_session` to count HTTP calls.
self.old_get_session = huggingface_hub.utils._http.get_session
self.session = requests.Session()
self.session.request = self.new_request
huggingface_hub.utils._http.get_session = lambda: self.session
return self
def __exit__(self, *args, **kwargs):
huggingface_hub.utils._http.get_session = self.old_get_session
def new_request(self, method, **kwargs):
if method == "GET":
self.get_request_count += 1
elif method == "HEAD":
self.head_request_count += 1
else:
self.other_request_count += 1
return requests.request(method=method, **kwargs)
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(
f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(
input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(
codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(
r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections.abc import Mapping
from io import StringIO
from pathlib import Path
from typing import Iterable, Iterator, List, Optional, Union
from unittest import mock
import huggingface_hub
import requests
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_fairscale_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
is_accelerate_available,
is_apex_available,
is_auto_gptq_available,
is_bitsandbytes_available,
is_bs4_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_essentia_available,
is_faiss_available,
is_flash_attn_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_tpu_available,
is_torch_xpu_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
import_path,
)
from _pytest.outcomes import skip
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_tool_tests = parse_flag_from_env("RUN_TOOL_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env(
"RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip("test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip("test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip("test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip("test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_tool_test(test_case):
"""
Decorator marking a test as a tool test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_tool_tests:
return unittest.skip("test is a tool test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_tool_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip("test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_accelerate(test_case):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(is_accelerate_available(), "test requires accelerate")(test_case)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_available(), "test requires Flash Attention")(test_case)
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_tpu(test_case):
"""
Decorator marking a test that requires a TPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_tpu_available(check_device=False), "test requires PyTorch TPU")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip("test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU and IPEX.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires IPEX and an XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup with IPEX and atleast one XPU device. These tests are
skipped on a machine without IPEX or multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip("test requires IPEX and atleast one XPU device")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_fairscale(test_case):
"""
Decorator marking a test that requires fairscale
"""
return unittest.skipUnless(is_fairscale_available(), "test requires fairscale")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator for bits and bytes (bnb) dependency
"""
return unittest.skipUnless(is_bitsandbytes_available(), "test requires bnb")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError(
"/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
"""
def __enter__(self):
self.head_request_count = 0
self.get_request_count = 0
self.other_request_count = 0
# Mock `get_session` to count HTTP calls.
self.old_get_session = huggingface_hub.utils._http.get_session
self.session = requests.Session()
self.session.request = self.new_request
huggingface_hub.utils._http.get_session = lambda: self.session
return self
def __exit__(self, *args, **kwargs):
huggingface_hub.utils._http.get_session = self.old_get_session
def new_request(self, method, **kwargs):
if method == "GET":
self.get_request_count += 1
elif method == "HEAD":
self.head_request_count += 1
else:
self.other_request_count += 1
return requests.request(method=method, **kwargs)
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(
f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(
input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(
codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(
r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env(
"RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(
min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(
torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(
torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError(
"/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(
urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(
f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(
input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(
codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(
r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed,
"cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {
"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {
"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(
f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError(
"Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env("RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env("RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env("RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env("RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError("/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
tr.reportchars = "wPpsxXEf" # emulate -rA (used in summary_passes() and short_test_summary())
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed, "cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError("Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| 2024-09-07 07:24:08.651007: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
2024-09-07 07:24:08.664975: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:485] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered
2024-09-07 07:24:08.681023: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:8454] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered
2024-09-07 07:24:08.685845: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1452] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered
2024-09-07 07:24:08.698459: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: AVX2 AVX512F AVX512_VNNI AVX512_BF16 AVX512_FP16 AVX_VNNI AMX_TILE AMX_INT8 AMX_BF16 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
2024-09-07 07:24:09.740366: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
ImportError while loading conftest '/workspace/test_repo/conftest.py'.
conftest.py:26: in <module>
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
src/transformers/testing_utils.py:54: in <module>
from .utils import (
E ImportError: cannot import name 'GGUF_MIN_VERSION' from 'transformers.utils' (/workspace/test_repo/src/transformers/utils/__init__.py)
| {'total': 17, 'passed': 6, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 3, 'warning': 8, 'error': 0} | {'total': 0, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | ./test_repo/src/transformers/testing_utils.py | ./test_repo/examples/tensorflow/test_tensorflow_examples.py |
2_transformers_callee__testing_utils__require_read_token__ad00c482c7fe9437c93bbc6be5a4a428c3219b5c | callee | fail-to-pass | https://github.com/huggingface/transformers | ad00c482c7fe9437c93bbc6be5a4a428c3219b5c | function | require_read_token | testing_utils.py | def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN", None)
@wraps(fn)
def _inner(*args, **kwargs):
with patch(huggingface_hub.utils._headers, "get_token", return_value=token):
return fn(*args, **kwargs)
return _inner
| def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import huggingface_hub
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_essentia_available,
is_faiss_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_tpu_available,
is_torch_xpu_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
import_path,
)
from _pytest.outcomes import skip
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_tool_tests = parse_flag_from_env("RUN_TOOL_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env(
"RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip("test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip("test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip("test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip("test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_tool_test(test_case):
"""
Decorator marking a test as a tool test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_tool_tests:
return unittest.skip("test is a tool test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_tool_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip("test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(is_accelerate_available(), "test requires accelerate")(test_case)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def _inner(*args, **kwargs):
with patch(huggingface_hub.utils._headers, "get_token", return_value=token):
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_tpu(test_case):
"""
Decorator marking a test that requires a TPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_tpu_available(check_device=False), "test requires PyTorch TPU")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip("test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU and IPEX.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires IPEX and an XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup with IPEX and atleast one XPU device. These tests are
skipped on a machine without IPEX or multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip("test requires IPEX and atleast one XPU device")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(
torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(
torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip("test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError(
"/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(
urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(
f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(
input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(
codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(
r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed,
"cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {
"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {
"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(
f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError(
"Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import huggingface_hub
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_essentia_available,
is_faiss_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_tpu_available,
is_torch_xpu_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
import_path,
)
from _pytest.outcomes import skip
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_tool_tests = parse_flag_from_env("RUN_TOOL_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env(
"RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip("test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip("test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip("test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip("test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_tool_test(test_case):
"""
Decorator marking a test as a tool test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_tool_tests:
return unittest.skip("test is a tool test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_tool_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip("test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(is_accelerate_available(), "test requires accelerate")(test_case)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN", None)
@wraps(fn)
def _inner(*args, **kwargs):
with patch(huggingface_hub.utils._headers, "get_token", return_value=token):
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_tpu(test_case):
"""
Decorator marking a test that requires a TPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_tpu_available(check_device=False), "test requires PyTorch TPU")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip("test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU and IPEX.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires IPEX and an XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup with IPEX and atleast one XPU device. These tests are
skipped on a machine without IPEX or multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip("test requires IPEX and atleast one XPU device")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(
torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(
torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip("test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError(
"/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(
urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(
f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(
input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(
codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(
r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed,
"cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {
"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {
"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(
f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError(
"Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env(
"RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(
min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(
torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(
torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError(
"/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(
urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(
f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(
input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(
codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(
r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed,
"cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {
"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {
"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(
f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError(
"Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env("RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env("RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env("RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env("RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError("/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
tr.reportchars = "wPpsxXEf" # emulate -rA (used in summary_passes() and short_test_summary())
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed, "cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError("Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| 2024-09-07 07:29:30.805736: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
2024-09-07 07:29:30.820650: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:485] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered
2024-09-07 07:29:30.836513: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:8454] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered
2024-09-07 07:29:30.841261: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1452] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered
2024-09-07 07:29:30.853851: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: AVX2 AVX512F AVX512_VNNI AVX512_BF16 AVX512_FP16 AVX_VNNI AMX_TILE AMX_INT8 AMX_BF16 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
2024-09-07 07:29:31.949477: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
ImportError while loading conftest '/workspace/test_repo/conftest.py'.
conftest.py:26: in <module>
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
src/transformers/testing_utils.py:54: in <module>
from .utils import (
E ImportError: cannot import name 'GGUF_MIN_VERSION' from 'transformers.utils' (/workspace/test_repo/src/transformers/utils/__init__.py)
| {'total': 17, 'passed': 6, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 3, 'warning': 8, 'error': 0} | {'total': 0, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | ./test_repo/src/transformers/testing_utils.py | ./test_repo/examples/tensorflow/test_tensorflow_examples.py |
2_transformers_callee__testing_utils___inner__ad00c482c7fe9437c93bbc6be5a4a428c3219b5c | callee | fail-to-pass | https://github.com/huggingface/transformers | ad00c482c7fe9437c93bbc6be5a4a428c3219b5c | function | _inner | testing_utils.py | def _inner(*args, **kwargs):
with patch(huggingface_hub.utils._headers, "get_token", return_value=token):
return fn(*args, **kwargs)
| def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import huggingface_hub
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_essentia_available,
is_faiss_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_tpu_available,
is_torch_xpu_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
import_path,
)
from _pytest.outcomes import skip
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_tool_tests = parse_flag_from_env("RUN_TOOL_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env(
"RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip("test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip("test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip("test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip("test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_tool_test(test_case):
"""
Decorator marking a test as a tool test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_tool_tests:
return unittest.skip("test is a tool test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_tool_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip("test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(is_accelerate_available(), "test requires accelerate")(test_case)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN", None)
@wraps(fn)
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_tpu(test_case):
"""
Decorator marking a test that requires a TPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_tpu_available(check_device=False), "test requires PyTorch TPU")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip("test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU and IPEX.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires IPEX and an XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup with IPEX and atleast one XPU device. These tests are
skipped on a machine without IPEX or multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip("test requires IPEX and atleast one XPU device")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(
torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(
torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip("test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError(
"/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(
urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(
f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(
input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(
codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(
r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed,
"cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {
"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {
"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(
f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError(
"Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import huggingface_hub
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_essentia_available,
is_faiss_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_tpu_available,
is_torch_xpu_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
import_path,
)
from _pytest.outcomes import skip
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_tool_tests = parse_flag_from_env("RUN_TOOL_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env(
"RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip("test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip("test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip("test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip("test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_tool_test(test_case):
"""
Decorator marking a test as a tool test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_tool_tests:
return unittest.skip("test is a tool test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_tool_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip("test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(is_accelerate_available(), "test requires accelerate")(test_case)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN", None)
@wraps(fn)
def _inner(*args, **kwargs):
with patch(huggingface_hub.utils._headers, "get_token", return_value=token):
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_tpu(test_case):
"""
Decorator marking a test that requires a TPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_tpu_available(check_device=False), "test requires PyTorch TPU")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip("test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU and IPEX.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires IPEX and an XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup with IPEX and atleast one XPU device. These tests are
skipped on a machine without IPEX or multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip("test requires IPEX and atleast one XPU device")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(
torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(
torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip("test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError(
"/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(
urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(
f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(
input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(
codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(
r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed,
"cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {
"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {
"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(
f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError(
"Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env(
"RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(
min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(
torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(
torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError(
"/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(
urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(
f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(
input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(
codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(
r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed,
"cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {
"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {
"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(
f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError(
"Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env("RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env("RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env("RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env("RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError("/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
tr.reportchars = "wPpsxXEf" # emulate -rA (used in summary_passes() and short_test_summary())
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed, "cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError("Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| 2024-09-07 07:32:42.807950: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
2024-09-07 07:32:42.822030: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:485] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered
2024-09-07 07:32:42.837662: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:8454] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered
2024-09-07 07:32:42.842480: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1452] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered
2024-09-07 07:32:42.855390: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: AVX2 AVX512F AVX512_VNNI AVX512_BF16 AVX512_FP16 AVX_VNNI AMX_TILE AMX_INT8 AMX_BF16 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
2024-09-07 07:32:43.926479: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
ImportError while loading conftest '/workspace/test_repo/conftest.py'.
conftest.py:26: in <module>
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
src/transformers/testing_utils.py:54: in <module>
from .utils import (
E ImportError: cannot import name 'GGUF_MIN_VERSION' from 'transformers.utils' (/workspace/test_repo/src/transformers/utils/__init__.py)
| {'total': 17, 'passed': 6, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 3, 'warning': 8, 'error': 0} | {'total': 0, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | ./test_repo/src/transformers/testing_utils.py | ./test_repo/examples/tensorflow/test_tensorflow_examples.py |
2_transformers_callee__testing_utils__require_intel_extension_for_pytorch__34097b3304d79ace845316d4929220623279c8bc | callee | fail-to-pass | https://github.com/huggingface/transformers | 34097b3304d79ace845316d4929220623279c8bc | function | require_intel_extension_for_pytorch | testing_utils.py | def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed.
"""
return unittest.skipUnless(is_ipex_available(), "test requires Intel Extension for PyTorch")(test_case)
| def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import inspect
import logging
import os
import re
import shlex
import shutil
import sys
import tempfile
import unittest
from collections.abc import Mapping
from distutils.util import strtobool
from io import StringIO
from pathlib import Path
from typing import Iterator, Union
from unittest import mock
from transformers import logging as transformers_logging
from .deepspeed import is_deepspeed_available
from .integrations import (
is_fairscale_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
)
from .utils import (
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_detectron2_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_ipex_available,
is_librosa_available,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_scatter_available,
is_scipy_available,
is_sentencepiece_available,
is_soundfile_availble,
is_spacy_available,
is_tensorflow_probability_available,
is_tf2onnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available,
is_torch_tf32_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdynamo_available,
is_vision_available,
)
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
PASS = "__DUMMY_TRANSFORMERS_PASS__"
ENDPOINT_STAGING = "https://moon-staging.huggingface.co"
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=False)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=False)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=False)
_run_git_lfs_tests = parse_flag_from_env("RUN_GIT_LFS_TESTS", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip("test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip("test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test.
Pipeline tests are skipped by default and we can run only them by setting RUN_PIPELINE_TESTS environment variable
to a truthy value and selecting the is_pipeline_test pytest mark.
"""
if not _run_pipeline_tests:
return unittest.skip("test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip("test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip("test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_git_lfs(test_case):
"""
Decorator marking a test that requires git-lfs.
git-lfs requires additional dependencies, and tests are skipped by default. Set the RUN_GIT_LFS_TESTS environment
variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_git_lfs_tests, "test of git lfs workflow")(test_case)
def require_accelerate(test_case):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(is_accelerate_available(), "test requires accelerate")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_torch_scatter(test_case):
"""
Decorator marking a test that requires PyTorch scatter.
These tests are skipped when PyTorch scatter isn't installed.
"""
return unittest.skipUnless(is_scatter_available(), "test requires PyTorch scatter")(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_scatter(test_case):
"""
Decorator marking a test that requires PyTorch Scatter. These tests are skipped when PyTorch Scatter isn't
installed.
"""
return unittest.skipUnless(is_scatter_available(), "test requires PyTorch Scatter")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_tpu(test_case):
"""
Decorator marking a test that requires a TPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_tpu_available(), "test requires PyTorch TPU")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0 or using CPU."""
return unittest.skipUnless(
is_torch_bf16_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0 or using CPU",
)(test_case)
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_fairscale(test_case):
"""
Decorator marking a test that requires fairscale
"""
return unittest.skipUnless(is_fairscale_available(), "test requires fairscale")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator for bits and bytes (bnb) dependency
"""
return unittest.skipUnless(is_bitsandbytes_available(), "test requires bnb")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError(
"/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
with open(report_files["passes"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import inspect
import logging
import os
import re
import shlex
import shutil
import sys
import tempfile
import unittest
from collections.abc import Mapping
from distutils.util import strtobool
from io import StringIO
from pathlib import Path
from typing import Iterator, Union
from unittest import mock
from transformers import logging as transformers_logging
from .deepspeed import is_deepspeed_available
from .integrations import (
is_fairscale_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
)
from .utils import (
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_detectron2_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_ipex_available,
is_librosa_available,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_scatter_available,
is_scipy_available,
is_sentencepiece_available,
is_soundfile_availble,
is_spacy_available,
is_tensorflow_probability_available,
is_tf2onnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available,
is_torch_tf32_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdynamo_available,
is_vision_available,
)
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
PASS = "__DUMMY_TRANSFORMERS_PASS__"
ENDPOINT_STAGING = "https://moon-staging.huggingface.co"
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=False)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=False)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=False)
_run_git_lfs_tests = parse_flag_from_env("RUN_GIT_LFS_TESTS", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip("test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip("test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test.
Pipeline tests are skipped by default and we can run only them by setting RUN_PIPELINE_TESTS environment variable
to a truthy value and selecting the is_pipeline_test pytest mark.
"""
if not _run_pipeline_tests:
return unittest.skip("test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip("test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip("test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_git_lfs(test_case):
"""
Decorator marking a test that requires git-lfs.
git-lfs requires additional dependencies, and tests are skipped by default. Set the RUN_GIT_LFS_TESTS environment
variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_git_lfs_tests, "test of git lfs workflow")(test_case)
def require_accelerate(test_case):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(is_accelerate_available(), "test requires accelerate")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed.
"""
return unittest.skipUnless(is_ipex_available(), "test requires Intel Extension for PyTorch")(test_case)
def require_torch_scatter(test_case):
"""
Decorator marking a test that requires PyTorch scatter.
These tests are skipped when PyTorch scatter isn't installed.
"""
return unittest.skipUnless(is_scatter_available(), "test requires PyTorch scatter")(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_scatter(test_case):
"""
Decorator marking a test that requires PyTorch Scatter. These tests are skipped when PyTorch Scatter isn't
installed.
"""
return unittest.skipUnless(is_scatter_available(), "test requires PyTorch Scatter")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_tpu(test_case):
"""
Decorator marking a test that requires a TPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_tpu_available(), "test requires PyTorch TPU")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0 or using CPU."""
return unittest.skipUnless(
is_torch_bf16_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0 or using CPU",
)(test_case)
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_fairscale(test_case):
"""
Decorator marking a test that requires fairscale
"""
return unittest.skipUnless(is_fairscale_available(), "test requires fairscale")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator for bits and bytes (bnb) dependency
"""
return unittest.skipUnless(is_bitsandbytes_available(), "test requires bnb")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError(
"/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
with open(report_files["passes"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env(
"RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env(
"RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env(
"RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env(
"RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(
min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(
torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(
torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(
f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError(
"/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(
f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(
r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
# emulate -rA (used in summary_passes() and short_test_summary())
tr.reportchars = "wPpsxXEf"
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(
l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(
l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin,
timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(
urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(
f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(
input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(
codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(
r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed,
"cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {
"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {
"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(
f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError(
"Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import doctest
import functools
import importlib
import inspect
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from collections import defaultdict
from collections.abc import Mapping
from functools import wraps
from io import StringIO
from pathlib import Path
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
from unittest import mock
from unittest.mock import patch
import urllib3
from transformers import logging as transformers_logging
from .integrations import (
is_clearml_available,
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
from .integrations.deepspeed import is_deepspeed_available
from .utils import (
ACCELERATE_MIN_VERSION,
GGUF_MIN_VERSION,
is_accelerate_available,
is_apex_available,
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_av_available,
is_bitsandbytes_available,
is_bs4_available,
is_cv2_available,
is_cython_available,
is_decord_available,
is_detectron2_available,
is_eetq_available,
is_essentia_available,
is_faiss_available,
is_fbgemm_gpu_available,
is_flash_attn_2_available,
is_flax_available,
is_fsdp_available,
is_ftfy_available,
is_g2p_en_available,
is_galore_torch_available,
is_gguf_available,
is_grokadamw_available,
is_ipex_available,
is_jieba_available,
is_jinja_available,
is_jumanpp_available,
is_keras_nlp_available,
is_levenshtein_available,
is_librosa_available,
is_liger_kernel_available,
is_lomo_available,
is_natten_available,
is_nltk_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_pretty_midi_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_quanto_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_soundfile_availble,
is_spacy_available,
is_sudachi_available,
is_sudachi_projection_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_tiktoken_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available_on_device,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_deterministic,
is_torch_fp16_available_on_device,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_sdpa_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_xla_available,
is_torch_xpu_available,
is_torchao_available,
is_torchaudio_available,
is_torchdynamo_available,
is_torchvision_available,
is_vision_available,
strtobool,
)
if is_accelerate_available():
from accelerate.state import AcceleratorState, PartialState
if is_pytest_available():
from _pytest.doctest import (
Module,
_get_checker,
_get_continue_on_failure,
_get_runner,
_is_mocked,
_patch_unwrap_mock_aware,
get_optionflags,
)
from _pytest.outcomes import skip
from _pytest.pathlib import import_path
from pytest import DoctestItem
else:
Module = object
DoctestItem = object
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
# Used to test Auto{Config, Model, Tokenizer} model_type detection.
# Used to test the hub
USER = "__DUMMY_TRANSFORMERS_USER__"
ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
# Not critical, only usable on the sandboxed CI instance.
TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
if is_torch_available():
import torch
IS_ROCM_SYSTEM = torch.version.hip is not None
IS_CUDA_SYSTEM = torch.version.cuda is not None
else:
IS_ROCM_SYSTEM = False
IS_CUDA_SYSTEM = False
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError(f"If set, {key} must be a int.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_pt_tf_cross_tests = parse_flag_from_env("RUN_PT_TF_CROSS_TESTS", default=True)
_run_pt_flax_cross_tests = parse_flag_from_env("RUN_PT_FLAX_CROSS_TESTS", default=True)
_run_custom_tokenizers = parse_flag_from_env("RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
_run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
_run_third_party_device_tests = parse_flag_from_env("RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
def is_pt_tf_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
"""
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
return unittest.skip(reason="test is PT+TF test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_tf_cross_test()(test_case)
def is_pt_flax_cross_test(test_case):
"""
Decorator marking a test as a test that control interactions between PyTorch and Flax
PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
"""
if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
return unittest.skip(reason="test is PT+FLAX test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Those tests will run using the staging environment of huggingface.co instead of the real model hub.
"""
if not _run_staging:
return unittest.skip(reason="test is staging test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_staging_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
skipped.
"""
if not _run_pipeline_tests:
return unittest.skip(reason="test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_agent_test(test_case):
"""
Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
"""
if not _run_agent_tests:
return unittest.skip(reason="test is an agent test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_agent_test()(test_case)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def tooslow(test_case):
"""
Decorator marking a test as too slow.
Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
these will not be tested by the CI.
"""
return unittest.skip(reason="test is too slow")(test_case)
def custom_tokenizers(test_case):
"""
Decorator marking a test for a custom tokenizer.
Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
def require_bs4(test_case):
"""
Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
"""
return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
def require_galore_torch(test_case):
"""
Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
https://github.com/jiaweizzhao/GaLore
"""
return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
def require_lomo(test_case):
"""
Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
https://github.com/OpenLMLab/LOMO
"""
return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
def require_grokadamw(test_case):
"""
Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
"""
return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
def require_cv2(test_case):
"""
Decorator marking a test that requires OpenCV.
These tests are skipped when OpenCV isn't installed.
"""
return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
def require_levenshtein(test_case):
"""
Decorator marking a test that requires Levenshtein.
These tests are skipped when Levenshtein isn't installed.
"""
return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
def require_nltk(test_case):
"""
Decorator marking a test that requires NLTK.
These tests are skipped when NLTK isn't installed.
"""
return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
"""
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
"""
return unittest.skipUnless(
is_accelerate_available(min_version), f"test requires accelerate version >= {min_version}"
)(test_case)
def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
"""
Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
"""
return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
test_case
)
def require_fsdp(test_case, min_version: str = "1.12.0"):
"""
Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
"""
return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
test_case
)
def require_g2p_en(test_case):
"""
Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
def require_safetensors(test_case):
"""
Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
"""
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
def require_rjieba(test_case):
"""
Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
"""
return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
def require_jieba(test_case):
"""
Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
"""
return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
def require_jinja(test_case):
"""
Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
"""
return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
def require_onnx(test_case):
return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires Timm.
These tests are skipped when Timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
def require_natten(test_case):
"""
Decorator marking a test that requires NATTEN.
These tests are skipped when NATTEN isn't installed.
"""
return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_flash_attn(test_case):
"""
Decorator marking a test that requires Flash Attention.
These tests are skipped when Flash Attention isn't installed.
"""
return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
def require_torch_sdpa(test_case):
"""
Decorator marking a test that requires PyTorch's SDPA.
These tests are skipped when requirements are not met (torch version).
"""
return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
def require_read_token(fn):
"""
A decorator that loads the HF token for tests that require to load gated models.
"""
token = os.getenv("HF_HUB_READ_TOKEN")
@wraps(fn)
def _inner(*args, **kwargs):
if token is not None:
with patch("huggingface_hub.utils._headers.get_token", return_value=token):
return fn(*args, **kwargs)
else: # Allow running locally with the default token env variable
return fn(*args, **kwargs)
return _inner
def require_peft(test_case):
"""
Decorator marking a test that requires PEFT.
These tests are skipped when PEFT isn't installed.
"""
return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires Torchvision.
These tests are skipped when Torchvision isn't installed.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
version.
"""
return unittest.skipUnless(
is_ipex_available(),
"test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
" https://github.com/intel/intel-extension-for-pytorch",
)(test_case)
def require_tensorflow_probability(test_case):
"""
Decorator marking a test that requires TensorFlow probability.
These tests are skipped when TensorFlow probability isn't installed.
"""
return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
test_case
)
def require_torchaudio(test_case):
"""
Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
"""
return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
"""
return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_sentencepiece(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
def require_sacremoses(test_case):
"""
Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
"""
return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
def require_seqio(test_case):
"""
Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
def require_scipy(test_case):
"""
Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
"""
return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
def require_tokenizers(test_case):
"""
Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
"""
return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
def require_tensorflow_text(test_case):
"""
Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
installed.
"""
return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
def require_keras_nlp(test_case):
"""
Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
"""
return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pytesseract(test_case):
"""
Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
"""
return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
def require_pytorch_quantization(test_case):
"""
Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
Quantization Toolkit isn't installed.
"""
return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
test_case
)
def require_vision(test_case):
"""
Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
installed.
"""
return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
def require_ftfy(test_case):
"""
Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
"""
return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
def require_spacy(test_case):
"""
Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
"""
return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
def require_decord(test_case):
"""
Decorator marking a test that requires decord. These tests are skipped when decord isn't installed.
"""
return unittest.skipUnless(is_decord_available(), "test requires decord")(test_case)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs.
To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_multi_accelerator(test_case):
"""
Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
test_case
)
def require_torch_non_multi_gpu(test_case):
"""
Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
def require_torch_non_multi_accelerator(test_case):
"""
Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
def require_torch_up_to_2_gpus(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
def require_torch_up_to_2_accelerators(test_case):
"""
Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
"""
if not is_torch_available():
return unittest.skip(reason="test requires PyTorch")(test_case)
return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")
(test_case)
def require_torch_xla(test_case):
"""
Decorator marking a test that requires TorchXLA (in PyTorch).
"""
return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
def require_torch_neuroncore(test_case):
"""
Decorator marking a test that requires NeuronCore (in PyTorch).
"""
return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
test_case
)
def require_torch_npu(test_case):
"""
Decorator marking a test that requires NPU (in PyTorch).
"""
return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
def require_torch_multi_npu(test_case):
"""
Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
multiple NPUs.
To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
"""
if not is_torch_npu_available():
return unittest.skip(reason="test requires PyTorch NPU")(test_case)
return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
def require_torch_xpu(test_case):
"""
Decorator marking a test that requires XPU (in PyTorch).
These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
must match match current PyTorch version.
"""
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
multiple XPUs.
To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
"""
if not is_torch_xpu_available():
return unittest.skip(reason="test requires PyTorch XPU")(test_case)
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
if is_torch_available():
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
if "TRANSFORMERS_TEST_BACKEND" in os.environ:
backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
f" traceback):\n{e}"
) from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
if torch_device == "cuda" and not torch.cuda.is_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
)
if torch_device == "xpu" and not is_torch_xpu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
)
if torch_device == "npu" and not is_torch_npu_available():
raise ValueError(
f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
)
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
) from e
elif torch.cuda.is_available():
torch_device = "cuda"
elif _run_third_party_device_tests and is_torch_npu_available():
torch_device = "npu"
elif _run_third_party_device_tests and is_torch_xpu_available():
torch_device = "xpu"
else:
torch_device = "cpu"
else:
torch_device = None
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
jax_device = jax.default_backend()
else:
jax_device = None
def require_torchdynamo(test_case):
"""Decorator marking a test that requires TorchDynamo"""
return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
def require_torchao(test_case):
"""Decorator marking a test that requires torchao"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
def require_torch_tensorrt_fx(test_case):
"""Decorator marking a test that requires Torch-TensorRT FX"""
return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accessible accelerator and PyTorch."""
return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
test_case
)
def require_torch_fp16(test_case):
"""Decorator marking a test that requires a device that supports fp16"""
return unittest.skipUnless(
is_torch_fp16_available_on_device(torch_device), "test requires device with fp16 support"
)(test_case)
def require_torch_bf16(test_case):
"""Decorator marking a test that requires a device that supports bf16"""
return unittest.skipUnless(
is_torch_bf16_available_on_device(torch_device), "test requires device with bf16 support"
)(test_case)
def require_torch_bf16_gpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
return unittest.skipUnless(
is_torch_bf16_gpu_available(),
"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
)(test_case)
def require_torch_bf16_cpu(test_case):
"""Decorator marking a test that requires torch>=1.10, using CPU."""
return unittest.skipUnless(
is_torch_bf16_cpu_available(),
"test requires torch>=1.10, using CPU",
)(test_case)
def require_deterministic_for_xpu(test_case):
if is_torch_xpu_available():
return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
test_case
)
else:
return test_case
def require_torch_tf32(test_case):
"""Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
return unittest.skipUnless(
is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
)(test_case)
def require_detectron2(test_case):
"""Decorator marking a test that requires detectron2."""
return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
def require_optuna(test_case):
"""
Decorator marking a test that requires optuna.
These tests are skipped when optuna isn't installed.
"""
return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
def require_ray(test_case):
"""
Decorator marking a test that requires Ray/tune.
These tests are skipped when Ray/tune isn't installed.
"""
return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
def require_sigopt(test_case):
"""
Decorator marking a test that requires SigOpt.
These tests are skipped when SigOpt isn't installed.
"""
return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb.
These tests are skipped when wandb isn't installed.
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test requires clearml.
These tests are skipped when clearml isn't installed.
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_soundfile(test_case):
"""
Decorator marking a test that requires soundfile
These tests are skipped when soundfile isn't installed.
"""
return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
def require_apex(test_case):
"""
Decorator marking a test that requires apex
"""
return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_av(test_case):
"""
Decorator marking a test that requires av
"""
return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
"""
if is_bitsandbytes_available() and is_torch_available():
try:
import pytest
return pytest.mark.bitsandbytes(test_case)
except ImportError:
return test_case
else:
return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
def require_optimum(test_case):
"""
Decorator for optimum dependency
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_tensorboard(test_case):
"""
Decorator for `tensorboard` dependency
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
def require_auto_gptq(test_case):
"""
Decorator for auto_gptq dependency
"""
return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
def require_auto_awq(test_case):
"""
Decorator for auto_awq dependency
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
def require_quanto(test_case):
"""
Decorator for quanto dependency
"""
return unittest.skipUnless(is_quanto_available(), "test requires quanto")(test_case)
def require_fbgemm_gpu(test_case):
"""
Decorator for fbgemm_gpu dependency
"""
return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
def require_phonemizer(test_case):
"""
Decorator marking a test that requires phonemizer
"""
return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
def require_pyctcdecode(test_case):
"""
Decorator marking a test that requires pyctcdecode
"""
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
def require_librosa(test_case):
"""
Decorator marking a test that requires librosa
"""
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
def require_liger_kernel(test_case):
"""
Decorator marking a test that requires liger_kernel
"""
return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
def require_essentia(test_case):
"""
Decorator marking a test that requires essentia
"""
return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
def require_pretty_midi(test_case):
"""
Decorator marking a test that requires pretty_midi
"""
return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
def cmd_exists(cmd):
return shutil.which(cmd) is not None
def require_usr_bin_time(test_case):
"""
Decorator marking a test that requires `/usr/bin/time`
"""
return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
def require_sudachi(test_case):
"""
Decorator marking a test that requires sudachi
"""
return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
def require_sudachi_projection(test_case):
"""
Decorator marking a test that requires sudachi_projection
"""
return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
test_case
)
def require_jumanpp(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
def require_cython(test_case):
"""
Decorator marking a test that requires jumanpp
"""
return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
def require_tiktoken(test_case):
"""
Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
"""
return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
def get_gpu_count():
"""
Return the number of available gpus (regardless of whether torch, tf or jax is used)
"""
if is_torch_available():
import torch
return torch.cuda.device_count()
elif is_tf_available():
import tensorflow as tf
return len(tf.config.list_physical_devices("GPU"))
elif is_flax_available():
import jax
return jax.device_count()
else:
return 0
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
#
# Helper functions for dealing with testing text outputs
# The original code came from:
# https://github.com/fastai/fastai/blob/master/tests/utils/text.py
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r"^.*\r", "", buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd:
"""
Context manager to capture:
- stdout: replay it, clean it up and make it available via `obj.out`
- stderr: replay it and make it available via `obj.err`
Args:
out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
By default each captured stream gets replayed back on context's exit, so that one can see what the test was
doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
disable this feature.
Examples:
```python
# to capture stdout only with auto-replay
with CaptureStdout() as cs:
print("Secret message")
assert "message" in cs.out
# to capture stderr only with auto-replay
import sys
with CaptureStderr() as cs:
print("Warning: ", file=sys.stderr)
assert "Warning" in cs.err
# to capture both streams with auto-replay
with CaptureStd() as cs:
print("Secret message")
print("Warning: ", file=sys.stderr)
assert "message" in cs.out
assert "Warning" in cs.err
# to capture just one of the streams, and not the other, with auto-replay
with CaptureStd(err=False) as cs:
print("Secret message")
assert "message" in cs.out
# but best use the stream-specific subclasses
# to capture without auto-replay
with CaptureStd(replay=False) as cs:
print("Secret message")
assert "message" in cs.out
```"""
def __init__(self, out=True, err=True, replay=True):
self.replay = replay
if out:
self.out_buf = StringIO()
self.out = "error: CaptureStd context is unfinished yet, called too early"
else:
self.out_buf = None
self.out = "not capturing stdout"
if err:
self.err_buf = StringIO()
self.err = "error: CaptureStd context is unfinished yet, called too early"
else:
self.err_buf = None
self.err = "not capturing stderr"
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
captured = self.out_buf.getvalue()
if self.replay:
sys.stdout.write(captured)
self.out = apply_print_resets(captured)
if self.err_buf:
sys.stderr = self.err_old
captured = self.err_buf.getvalue()
if self.replay:
sys.stderr.write(captured)
self.err = captured
def __repr__(self):
msg = ""
if self.out_buf:
msg += f"stdout: {self.out}\n"
if self.err_buf:
msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
"""Same as CaptureStd but captures only stdout"""
def __init__(self, replay=True):
super().__init__(err=False, replay=replay)
class CaptureStderr(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
class CaptureLogger:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
class TestCasePlus(unittest.TestCase):
"""
This class extends *unittest.TestCase* with additional features.
Feature 1: A set of fully resolved important file and dir path accessors.
In tests often we need to know where things are relative to the current test file, and it's not trivial since the
test could be invoked from more than one directory or could reside in sub-directories with different depths. This
class solves this problem by sorting out all the basic paths and provides easy accessors to them:
- `pathlib` objects (all fully resolved):
- `test_file_path` - the current test file path (=`__file__`)
- `test_file_dir` - the directory containing the current test file
- `tests_dir` - the directory of the `tests` test suite
- `examples_dir` - the directory of the `examples` test suite
- `repo_root_dir` - the directory of the repository
- `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
- stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
- `test_file_path_str`
- `test_file_dir_str`
- `tests_dir_str`
- `examples_dir_str`
- `repo_root_dir_str`
- `src_dir_str`
Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
1. Create a unique temporary dir:
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir()
```
`tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
test.
2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
empty it after the test.
```python
def test_whatever(self):
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
```
This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
didn't leave any data in there.
3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
following behavior:
`before=True`: the temporary dir will always be cleared at the beginning of the test.
`before=False`: if the temporary dir already existed, any existing files will remain there.
`after=True`: the temporary dir will always be deleted at the end of the test.
`after=False`: the temporary dir will always be left intact at the end of the test.
Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
will get nuked. i.e. please always pass paths that start with `./`
Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
otherwise.
Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
is useful for invoking external programs from the test suite - e.g. distributed training.
```python
def test_whatever(self):
env = self.get_env()
```"""
def setUp(self):
# get_auto_remove_tmp_dir feature:
self.teardown_tmp_dirs = []
# figure out the resolved paths for repo_root, tests, examples, etc.
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self._test_file_dir = path.parents[0]
for up in [1, 2, 3]:
tmp_dir = path.parents[up]
if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
break
if tmp_dir:
self._repo_root_dir = tmp_dir
else:
raise ValueError(f"can't figure out the root of the repo from {self._test_file_path}")
self._tests_dir = self._repo_root_dir / "tests"
self._examples_dir = self._repo_root_dir / "examples"
self._src_dir = self._repo_root_dir / "src"
@property
def test_file_path(self):
return self._test_file_path
@property
def test_file_path_str(self):
return str(self._test_file_path)
@property
def test_file_dir(self):
return self._test_file_dir
@property
def test_file_dir_str(self):
return str(self._test_file_dir)
@property
def tests_dir(self):
return self._tests_dir
@property
def tests_dir_str(self):
return str(self._tests_dir)
@property
def examples_dir(self):
return self._examples_dir
@property
def examples_dir_str(self):
return str(self._examples_dir)
@property
def repo_root_dir(self):
return self._repo_root_dir
@property
def repo_root_dir_str(self):
return str(self._repo_root_dir)
@property
def src_dir(self):
return self._src_dir
@property
def src_dir_str(self):
return str(self._src_dir)
def get_env(self):
"""
Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
the preset `PYTHONPATH` if any (all full resolved paths).
"""
env = os.environ.copy()
paths = [self.src_dir_str]
if "/examples" in self.test_file_dir_str:
paths.append(self.examples_dir_str)
else:
paths.append(self.tests_dir_str)
paths.append(env.get("PYTHONPATH", ""))
env["PYTHONPATH"] = ":".join(paths)
return env
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
"""
Args:
tmp_dir (`string`, *optional*):
if `None`:
- a unique temporary path will be created
- sets `before=True` if `before` is `None`
- sets `after=True` if `after` is `None`
else:
- `tmp_dir` will be created
- sets `before=True` if `before` is `None`
- sets `after=False` if `after` is `None`
before (`bool`, *optional*):
If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
`tmp_dir` already exists, any existing files will remain there.
after (`bool`, *optional*):
If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
intact at the end of the test.
Returns:
tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
"""
if tmp_dir is not None:
# defining the most likely desired behavior for when a custom path is provided.
# this most likely indicates the debug mode where we want an easily locatable dir that:
# 1. gets cleared out before the test (if it already exists)
# 2. is left intact after the test
if before is None:
before = True
if after is None:
after = False
# using provided path
path = Path(tmp_dir).resolve()
# to avoid nuking parts of the filesystem, only relative paths are allowed
if not tmp_dir.startswith("./"):
raise ValueError(
f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
)
# ensure the dir is empty to start with
if before is True and path.exists():
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
# defining the most likely desired behavior for when a unique tmp path is auto generated
# (not a debug mode), here we require a unique tmp dir that:
# 1. is empty before the test (it will be empty in this situation anyway)
# 2. gets fully removed after the test
if before is None:
before = True
if after is None:
after = True
# using unique tmp dir (always empty, regardless of `before`)
tmp_dir = tempfile.mkdtemp()
if after is True:
# register for deletion
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def python_one_liner_max_rss(self, one_liner_str):
"""
Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
program.
Args:
one_liner_str (`string`):
a python one liner code that gets passed to `python -c`
Returns:
max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
Requirements:
this helper needs `/usr/bin/time` to be installed (`apt install time`)
Example:
```
one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
max_rss = self.python_one_liner_max_rss(one_liner_str)
```
"""
if not cmd_exists("/usr/bin/time"):
raise ValueError("/usr/bin/time is required, install with `apt install time`")
cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
with CaptureStd() as cs:
execute_subprocess_async(cmd, env=self.get_env())
# returned data is in KB so convert to bytes
max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
return max_rss
def tearDown(self):
# get_auto_remove_tmp_dir feature: remove registered temp dirs
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = []
if is_accelerate_available():
AcceleratorState._reset_state()
PartialState._reset_state()
# delete all the env variables having `ACCELERATE` in them
for k in list(os.environ.keys()):
if "ACCELERATE" in k:
del os.environ[k]
def mockenv(**kwargs):
"""
this is a convenience wrapper, that allows this ::
@mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
"""
return mock.patch.dict(os.environ, kwargs)
# from https://stackoverflow.com/a/34333710/9201239
@contextlib.contextmanager
def mockenv_context(*remove, **update):
"""
Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
Args:
remove: Environment variables to remove.
update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = f"reports/{id}"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
tr.reportchars = "wPpsxXEf" # emulate -rA (used in summary_passes() and short_test_summary())
# Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
# takes > 10 minutes (as this part doesn't generate any output on the terminal).
# (also, it seems there is no useful information in this report, and we rarely need to read it)
# with open(report_files["passes"], "w") as f:
# tr._tw = create_terminal_writer(config, f)
# tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
def nested_simplify(obj, decimals=3):
"""
Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
within tests.
"""
import numpy as np
if isinstance(obj, list):
return [nested_simplify(item, decimals) for item in obj]
if isinstance(obj, tuple):
return tuple([nested_simplify(item, decimals) for item in obj])
elif isinstance(obj, np.ndarray):
return nested_simplify(obj.tolist())
elif isinstance(obj, Mapping):
return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
elif isinstance(obj, (str, int, np.int64)):
return obj
elif obj is None:
return obj
elif is_torch_available() and isinstance(obj, torch.Tensor):
return nested_simplify(obj.tolist(), decimals)
elif is_tf_available() and tf.is_tensor(obj):
return nested_simplify(obj.numpy().tolist())
elif isinstance(obj, float):
return round(obj, decimals)
elif isinstance(obj, (np.int32, np.float32)):
return nested_simplify(obj.item(), decimals)
else:
raise Exception(f"Not supported: {type(obj)}")
def check_json_file_has_correct_format(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
if len(lines) == 1:
# length can only be 1 if dict is empty
assert lines[0] == "{}"
else:
# otherwise make sure json has correct format (at least 3 lines)
assert len(lines) >= 3
# each key one line, ident should be 2, min length is 3
assert lines[0].strip() == "{"
for line in lines[1:-1]:
left_indent = len(lines[1]) - len(lines[1].lstrip())
assert left_indent == 2
assert lines[-1].strip() == "}"
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class RequestCounter:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self.patcher = patch.object(urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
for call in self.mock.call_args_list:
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresonding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
codeblocks = re.split(re.compile(codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
class HfDocTestParser(doctest.DocTestParser):
"""
Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# fmt: off
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
(?:\n|$) # Match a new line or end of string
)*)
''', re.MULTILINE | re.VERBOSE
)
# fmt: on
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
def parse(self, string, name="<string>"):
"""
Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
calling `super().parse`
"""
string = preprocess_string(string, self.skip_cuda_tests)
return super().parse(string, name)
class HfDoctestModule(Module):
"""
Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
tests.
"""
def collect(self) -> Iterable[DoctestItem]:
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""Doctest code does not take into account `@property`, this
is a hackish way to fix it. https://bugs.python.org/issue17446
Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
reported upstream. #8796
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
if hasattr(obj, "__wrapped__"):
# Get the main obj in case of it being wrapped
obj = inspect.unwrap(obj)
# Type ignored because this is a private function.
return super()._find_lineno( # type:ignore[misc]
obj,
source_lines,
)
def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
super()._find( # type:ignore[misc]
tests, obj, name, module, source_lines, globs, seen
)
if self.path.name == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(
self.path,
root=self.config.rootpath,
mode=self.config.getoption("importmode"),
)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
skip("unable to import module %r" % self.path)
else:
raise
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
finder = MockAwareDocTestFinder(parser=HfDocTestParser())
# !!!!!!!!!!! HF Specific !!!!!!!!!!!
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests and cuda
yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against `None`
# instead at user level.
if fn is None:
return None
return fn(*args, **kwargs)
if is_torch_available():
# Mappings from device names to callable functions to support device agnostic
# testing.
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed, "cpu": torch.manual_seed, "default": torch.manual_seed}
BACKEND_EMPTY_CACHE = {"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
BACKEND_DEVICE_COUNT = {"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
else:
BACKEND_MANUAL_SEED = {"default": None}
BACKEND_EMPTY_CACHE = {"default": None}
BACKEND_DEVICE_COUNT = {"default": lambda: 0}
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
if is_torch_available():
# If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
# into device to function mappings.
if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(
f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
)
# Try to strip extension for later import – also verifies we are importing a
# python file.
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
# Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError as e:
raise AttributeError("Device spec file did not contain `DEVICE_NAME`") from e
if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
| 2024-09-07 07:42:54.085210: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
2024-09-07 07:42:54.099468: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:485] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered
2024-09-07 07:42:54.115633: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:8454] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered
2024-09-07 07:42:54.120493: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1452] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered
2024-09-07 07:42:54.133382: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: AVX2 AVX512F AVX512_VNNI AVX512_BF16 AVX512_FP16 AVX_VNNI AMX_TILE AMX_INT8 AMX_BF16 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
2024-09-07 07:42:55.254641: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
ImportError while loading conftest '/workspace/test_repo/conftest.py'.
conftest.py:26: in <module>
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
src/transformers/testing_utils.py:54: in <module>
from .utils import (
E ImportError: cannot import name 'GGUF_MIN_VERSION' from 'transformers.utils' (/workspace/test_repo/src/transformers/utils/__init__.py)
| {'total': 17, 'passed': 6, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 3, 'warning': 8, 'error': 0} | {'total': 0, 'passed': 0, 'xpassed': 0, 'failed': 0, 'xfailed': 0, 'deselected': 0, 'skipped': 0, 'warning': 0, 'error': 0} | ./test_repo/src/transformers/testing_utils.py | ./test_repo/examples/tensorflow/test_tensorflow_examples.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.