Spaces:
Sleeping
Sleeping
File size: 4,200 Bytes
5b70050 c838fa6 bec1d7f c8ddc02 1005960 16126d1 f1c1b8a 16126d1 c351517 f1c1b8a 16126d1 1005960 16126d1 1005960 16126d1 1005960 16126d1 c351517 16126d1 d86ac9a 2e24e06 5b70050 5748c22 2e24e06 5b70050 bec1d7f 5b70050 bec1d7f 5b70050 bec1d7f 5b70050 bec1d7f 5b70050 bec1d7f 5b70050 daedea1 5b70050 d86ac9a 5b70050 d86ac9a c5dba7c d86ac9a 1005960 d86ac9a 1005960 d86ac9a 1005960 d86ac9a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 |
from openai import OpenAI # type: ignore
import os
def call_openai(
user_prompt,
chat_history: list[tuple[str, str]],
system_prompt,
max_tokens,
temperature,
top_p,
file_upload=None,
image_upload=None
):
if file_upload == None:
try:
pass
except:
pass
if image_upload == None:
try:
pass
except:
pass
#read system message
messages = [{"role": "system", "content": system_prompt}]
#read history
for user_chat, assistant_chat in chat_history:
if user_chat:
messages.append({"role": "user", "content": user_chat})
if assistant_chat:
messages.append({"role": "assistant", "content": assistant_chat})
#read output
messages.append({"role": "user", "content": user_prompt})
print("## Messages: \n", messages) #debug output
#create output
response = OpenAI().responses.create(
model="gpt-4.1-nano",
input=messages,
temperature=temperature,
top_p=top_p,
max_output_tokens=max_tokens
)
#read output
response = response.output_text
print("## Response: ", response) #debug output
print("\n")
yield response #chat reply
def call_deepseek(
user_prompt: str,
chat_history: list[tuple[str, str]],
system_prompt: str,
max_tokens: int,
temperature: float,
top_p: float,
file_upload=None,
image_upload=None
):
deepseek = OpenAI(api_key = os.environ["DEEPSEEK_API_KEY"], base_url="https://api.deepseek.com")
"""
Gọi DeepSeek Chat qua OpenAI-compatible API (không stream).
- file_upload và image_upload tùy chọn (None để bỏ qua xử lý).
Trả về:
- reply (str): nội dung model sinh ra.
"""
# 1. Xử lý tùy chọn file (nếu có)
if file_upload == None:
try:
pass
except:
pass
if image_upload == None:
try:
pass
except:
pass
# 3. Xây dựng messages lịch sử chat
messages = [{"role": "system", "content": system_prompt}]
for user_msg, ai_msg in chat_history:
if user_msg:
messages.append({"role": "user", "content": user_msg})
if ai_msg:
messages.append({"role": "assistant", "content": ai_msg})
# Thêm prompt hiện tại của user
messages.append({"role": "user", "content": user_prompt})
# 4. Gọi API DeepSeek Chat (OpenAI-compatible)
response = deepseek.chat.completions.create(
model="deepseek-chat", # hoặc model bạn cấu hình
messages=messages,
temperature=temperature,
top_p=top_p,
max_tokens=max_tokens
)
# 5. Trích xuất kết quả trả về
reply = response.choices[0].message.content
return reply
"""
Không có billing nên không xài được replicate
"""
# import replicate
# def deepseek_api_replicate(
# user_prompt,
# history: list[tuple[str, str]],
# system_prompt,
# max_new_tokens,
# temperature,
# top_p):
# """
# Gọi DeepSeek Math trên Replicate và trả ngay kết quả.
# Trả về:
# str hoặc [bytes]: output model sinh ra
# """
# # 1. Khởi tạo client và xác thực
# # token = os.getenv("REPLICATE_API_TOKEN")
# # if not token:
# # raise RuntimeError("Missing REPLICATE_API_TOKEN") # bảo mật bằng biến môi trường
# client = replicate.Client(api_token="REPLICATE_API_TOKEN")
# # 2. Gọi model
# output = client.run(
# "deepseek-ai/deepseek-math-7b-base:61f572dae0985541cdaeb4a114fd5d2d16cb40dac3894da10558992fc60547c7",
# input={
# "system_prompt": system_prompt,
# "user_prompt": user_prompt,
# "max_new_tokens": max_new_tokens,
# "temperature": temperature,
# "top_p": top_p
# }
# )
# # 3. Trả kết quả
# return output |