PlanExe / app.py
Simon Strandgaard
Invoking app.py now opens the app_text2plan gradio UI
cd2e83b
raw
history blame
3.05 kB
import os
import subprocess
import gradio as gr
from huggingface_hub import InferenceClient
from src.prompt.prompt_catalog import PromptCatalog
from src.llm_factory import get_llm
from src.plan.app_text2plan import demo_text2plan
from llama_index.core.llms import ChatMessage
if False:
llm = get_llm("openrouter-paid-gemini-2.0-flash-001")
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
ChatMessage(role="user", content="What is your name"),
]
resp = llm.stream_chat(messages)
for r in resp:
print(r.delta, end="")
if False:
# Load modules from src
prompt_catalog = PromptCatalog()
prompt_catalog.load(os.path.join(os.path.dirname(__file__), 'src', 'plan', 'data', 'simple_plan_prompts.jsonl'))
prompt_item = prompt_catalog.find("4dc34d55-0d0d-4e9d-92f4-23765f49dd29")
print(prompt_item)
if False:
# Spawn a child process that lists files in the current directory.
result = subprocess.run(["pwd"], capture_output=True, text=True)
print("Child process output:")
print(result.stdout)
if False:
# Write a file
with open("output.txt", "w") as f:
f.write("Hello from Hugging Face 1 Spaces!")
# Read the file back
with open("output.txt", "r") as f:
content = f.read()
print("File content:", content)
"""
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
def respond(
message,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
):
messages = [{"role": "system", "content": system_message}]
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
response = ""
for message in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
token = message.choices[0].delta.content
response += token
yield response
"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""
demo = gr.ChatInterface(
respond,
additional_inputs=[
gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (nucleus sampling)",
),
],
)
if __name__ == "__main__":
demo_text2plan.launch()