File size: 2,661 Bytes
c7e8396 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 |
import base64
from abc import ABC
from typing import Any, Dict, Optional, Union
from huggingface_hub.hf_api import InferenceProviderMapping
from huggingface_hub.inference._common import RequestParameters, _as_dict
from huggingface_hub.inference._providers._common import (
BaseConversationalTask,
BaseTextGenerationTask,
TaskProviderHelper,
filter_none,
)
_PROVIDER = "together"
_BASE_URL = "https://api.together.xyz"
class TogetherTask(TaskProviderHelper, ABC):
"""Base class for Together API tasks."""
def __init__(self, task: str):
super().__init__(provider=_PROVIDER, base_url=_BASE_URL, task=task)
def _prepare_route(self, mapped_model: str, api_key: str) -> str:
if self.task == "text-to-image":
return "/v1/images/generations"
elif self.task == "conversational":
return "/v1/chat/completions"
elif self.task == "text-generation":
return "/v1/completions"
raise ValueError(f"Unsupported task '{self.task}' for Together API.")
class TogetherTextGenerationTask(BaseTextGenerationTask):
def __init__(self):
super().__init__(provider=_PROVIDER, base_url=_BASE_URL)
def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:
output = _as_dict(response)["choices"][0]
return {
"generated_text": output["text"],
"details": {
"finish_reason": output.get("finish_reason"),
"seed": output.get("seed"),
},
}
class TogetherConversationalTask(BaseConversationalTask):
def __init__(self):
super().__init__(provider=_PROVIDER, base_url=_BASE_URL)
class TogetherTextToImageTask(TogetherTask):
def __init__(self):
super().__init__("text-to-image")
def _prepare_payload_as_dict(
self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
) -> Optional[Dict]:
mapped_model = provider_mapping_info.provider_id
parameters = filter_none(parameters)
if "num_inference_steps" in parameters:
parameters["steps"] = parameters.pop("num_inference_steps")
if "guidance_scale" in parameters:
parameters["guidance"] = parameters.pop("guidance_scale")
return {"prompt": inputs, "response_format": "base64", **parameters, "model": mapped_model}
def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:
response_dict = _as_dict(response)
return base64.b64decode(response_dict["data"][0]["b64_json"])
|