File size: 4,888 Bytes
8575cb5
a255fea
8575cb5
a255fea
 
 
 
 
3721bf6
 
a255fea
5f9dbfa
 
a255fea
03a3aa7
 
 
 
8575cb5
a255fea
5f9dbfa
 
 
8575cb5
3721bf6
3aac3b1
3721bf6
5f9dbfa
a255fea
 
5f9dbfa
a255fea
5f9dbfa
 
 
 
a255fea
 
 
 
5f9dbfa
 
 
 
 
 
 
 
 
 
 
 
 
 
8575cb5
5f9dbfa
a255fea
 
5f9dbfa
a255fea
 
 
5f9dbfa
a255fea
 
5f9dbfa
a255fea
5f9dbfa
 
a255fea
5f9dbfa
 
 
a255fea
 
 
5f9dbfa
a255fea
5f9dbfa
 
3721bf6
5f9dbfa
 
 
a255fea
5f9dbfa
 
 
 
 
 
 
 
 
 
 
8575cb5
5f9dbfa
 
 
8575cb5
 
5f9dbfa
80b38e7
5f9dbfa
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
import gradio as gr
from gradio import ChatMessage

import json
from openai import OpenAI
from tools import tools, oitools
from dotenv import load_dotenv
import os
import re

load_dotenv(".env")
HF_TOKEN = os.environ.get("HF_TOKEN")  
BASE_URL = os.environ.get("BASE_URL")  

SYSTEM_PROMPT_TEMPLATE = """You are an AI assistant designed to assist users with a hotel booking and information system. Your role is to provide detailed and accurate information about the hotel, including available accommodations, facilities, dining options, and reservation services. You can assist with bookings, modify or cancel reservations, and answer general inquiries about the hotel, etc.

Maintain clarity, conciseness, and relevance in your responses, ensuring a seamless user experience. 
Always respond in the same **language as the user’s query** to preserve their preferred language."""

client = OpenAI(
    base_url=f"{BASE_URL}/v1",  
    api_key=HF_TOKEN
)

def clean_json_string(json_str):
    return re.sub(r'[ ,}\s]+$', '', json_str) + '}'

def completion(history, model, system_prompt, tools=None):  
    messages = [{"role": "system", "content": system_prompt}]
    for msg in history:
        if isinstance(msg, dict):  
            msg = ChatMessage(**msg)
        if msg.role == "assistant" and hasattr(msg, "metadata") and msg.metadata:  
            tools_calls = json.loads(msg.metadata.get("title", "[]")) 
            # for tool_calls in tools_calls:
            #     tool_calls["function"]["arguments"] = json.loads(tool_calls["function"]["arguments"])
            messages.append({"role": "assistant", "tool_calls": tools_calls})
            messages.append({"role": "tool", "content": msg.content})
        else:
            messages.append({"role": msg.role, "content": msg.content})
    
    request_params = {
        "model": model,
        "messages": messages,
        "stream": True,
        "max_tokens": 1000,
        "temperature": 0.4,
        "frequency_penalty": 1,
        "extra_body": {"repetition_penalty": 1.1},
    }
    if tools:
        request_params.update({"tool_choice": "auto", "tools": tools})
    
    return client.chat.completions.create(**request_params)  

def llm_in_loop(history, system_prompt, recursive):  
    try:   
        models = client.models.list()
        model = models.data[0].id if models.data else "gpt-3.5-turbo"  
    except Exception as err:
        gr.Warning("The model is initializing. Please wait; this may take 5 to 10 minutes ⏳.", duration=20)
        raise err
    
    arguments = ""
    name = ""
    chat_completion = completion(history=history, tools=oitools, model=model, system_prompt=system_prompt)  
    appended = False
    for chunk in chat_completion:
        if chunk.choices and chunk.choices[0].delta.tool_calls:
            call = chunk.choices[0].delta.tool_calls[0]
            if hasattr(call.function, "name") and call.function.name:
                name = call.function.name
            if hasattr(call.function, "arguments") and call.function.arguments:
                arguments += call.function.arguments
        elif chunk.choices[0].delta.content:
            if not appended:
                history.append(ChatMessage(role="assistant", content=""))
                appended = True
            history[-1].content += chunk.choices[0].delta.content
            yield history[recursive:]
    
    arguments = json.loads(clean_json_string(arguments) if arguments else "{}")
    if appended:
        recursive -= 1
    if name:
        result = f"💥 Error using tool {name}, tool doesn't exist" if name not in tools else str(tools[name].invoke(input=arguments))
        result = json.dumps({name: result}, ensure_ascii=False)
        # msg = ChatMessage(
        #             role="assistant",
        #             content="",
        #             metadata= {"title": f"🛠️ Using tool '{name}', arguments: {json.dumps(json_arguments, ensure_ascii=False)}"},
        #             options=[{"label":"tool_calls", "value": json.dumps([{"id": "call_FthC9qRpsL5kBpwwyw6c7j4k","function": {"arguments": arguments,"name": name},"type": "function"}])}]
        #         )
        history.append(ChatMessage(role="assistant", content=result, metadata={"title": json.dumps([{"id": "call_id", "function": {"arguments": json.dumps(arguments), "name": name}, "type": "function"}])}))
        yield history[recursive:]
        yield from llm_in_loop(history, system_prompt, recursive - 1)

def respond(message, history, additional_inputs):  
    history.append(ChatMessage(role="user", content=message))
    yield from llm_in_loop(history, additional_inputs, -1)

if __name__ == "__main__":
    system_prompt = gr.Textbox(label="System prompt", value=SYSTEM_PROMPT_TEMPLATE, lines=3)  
    demo = gr.ChatInterface(respond, type="messages", additional_inputs=[system_prompt])
    demo.launch()