import logging # Configure Logger logger = logging.getLogger(__name__) def get_model(task: str, model_key: str, device="cpu"): """ Dynamically retrieves the model instance based on the task and model_key. Args: task (str): One of "detection", "segmentation", or "depth". model_key (str): Model identifier or variant. device (str): Device to run inference on ("cpu" or "cuda"). Returns: object: Initialized model ready for inference. Raises: ValueError: If task is unsupported or model loading fails. """ logger.info(f"Request received to load model '{model_key}' for task '{task}' on device '{device}'") try: if task == "detection": from models.detection.detector import ObjectDetector return ObjectDetector(model_key=model_key, device=device) elif task == "segmentation": from models.segmentation.segmenter import Segmenter return Segmenter(model_key=model_key, device=device) elif task == "depth": from models.depth.depth_estimator import DepthEstimator return DepthEstimator(model_key=model_key, device=device) else: error_msg = f"Unsupported task '{task}'. Valid options are: 'detection', 'segmentation', 'depth'." logger.error(error_msg) raise ValueError(error_msg) except Exception as e: logger.error(f"Error while loading model '{model_key}' for task '{task}': {e}") raise