File size: 1,488 Bytes
f4ec833 ee6c602 9ba2a32 ee6c602 49e706c f4ec833 3844ee8 9ba2a32 db61287 3844ee8 ee6c602 db61287 9398545 ee6c602 9398545 ee6c602 db61287 f4ec833 ee6c602 8a773b5 f4ec833 5350e2e aaa37fd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
import gradio as gr
from openai import OpenAI
client = OpenAI(
base_url="https://integrate.api.nvidia.com/v1",
api_key="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"
)
system_prompt = {"role": "system", "content": "You are a helpful assistant to answer user queries."}
def get_text_response(user_message, history=None):
if history is None:
history = []
# Prepare messages for OpenAI API in the correct format
messages = [system_prompt]
for user_msg, assistant_msg in history:
messages.append({"role": "user", "content": user_msg})
messages.append({"role": "assistant", "content": assistant_msg})
messages.append({"role": "user", "content": user_message})
response = ""
completion = client.chat.completions.create(
model="nvidia/llama-3.1-nemotron-70b-instruct",
messages=messages,
temperature=0.5,
top_p=1,
max_tokens=100,
stream=True
)
for chunk in completion:
delta = chunk.choices[0].delta
if delta and delta.content:
response += delta.content
history.append((user_message, response))
return history, history
demo = gr.ChatInterface(
fn=get_text_response,
title="🧠 Nemotron 70B Assistant",
theme="soft",
examples=["How are you doing?", "What are your interests?", "Which places do you like to visit?"]
)
if __name__ == "__main__":
demo.queue().launch(share=True, debug=True)
|