iruno commited on
Commit
93c3263
·
verified ·
1 Parent(s): 9e16171

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -100
app.py CHANGED
@@ -1,114 +1,64 @@
1
- import os
2
- import logging
3
  import gradio as gr
4
- import openai
5
- import multiprocessing
6
 
7
- from process_run import process_run
 
 
 
8
 
9
- # Configure logging
10
- logging.basicConfig(
11
- level=logging.INFO,
12
- format='%(asctime)s - %(levelname)s - %(message)s',
13
- handlers=[
14
- logging.StreamHandler(),
15
- ]
16
- )
17
- logger = logging.getLogger(__name__)
18
- logger.setLevel('INFO')
19
-
20
- # Set your OpenAI API key
21
- openai.api_key = os.getenv("OPENAI_API_KEY")
22
 
 
 
 
 
 
 
 
 
 
23
 
24
- # Example instructions to display
25
- EXAMPLES = [
26
- "When did the solar system form? Find on wikipedia.",
27
- "Find the rating of Monopoly (1935) on boardgamegeek.com",
28
- ]
29
 
30
- URL_EXAMPLES = [
31
- "about:blank",
32
- "https://www.wikipedia.org",
33
- "https://www.boardgamegeek.com"
34
- ]
35
 
36
- def main():
37
- logger.info("Starting BrowserGym web agent")
38
-
39
- with gr.Blocks(title="WebShephered Demo") as demo:
40
- # Add CSS for outlined groups
41
- gr.Markdown("# WebShephered Demo")
42
- with gr.Row():
43
- with gr.Column(scale=2):
44
- with gr.Column():
45
- instruction = gr.Textbox(
46
- label="Instruction",
47
- placeholder="Enter your instruction here",
48
- lines=2,
49
- )
50
- gr.Examples(
51
- examples=[[e] for e in EXAMPLES],
52
- inputs=instruction,
53
- cache_examples=False,
54
- )
55
-
56
- gr.Markdown("\n\n")
57
-
58
- with gr.Column():
59
- start_url = gr.Textbox(
60
- label="Starting URL",
61
- placeholder="URL to start the browser at",
62
- value="about:blank"
63
- )
64
- gr.Examples(
65
- examples=URL_EXAMPLES,
66
- inputs=start_url,
67
- cache_examples=False,
68
- )
69
 
70
- gr.Markdown("\n\n")
 
 
 
 
 
 
 
71
 
72
- model_name = gr.Dropdown(
73
- label="Agent Model",
74
- choices=["gpt-4o"],
75
- value="gpt-4o"
76
- )
77
- run_btn = gr.Button("Run Demo")
78
 
79
- gr.Markdown("---")
80
-
81
- with gr.Column():
82
- gr.Markdown("## Current State")
83
- state_view = gr.Markdown()
84
- browser_view = gr.Image(label="Browser View")
85
 
86
- gr.Markdown("### Task Checklist from WebShephered")
87
- checklist_view = gr.Markdown()
88
-
89
- gr.Markdown("### Action Selection in current step")
90
- with gr.Row() as rm_row:
91
- rm_cards_container = gr.HTML()
92
- with gr.Column(scale=2):
93
- gr.Markdown("## Trajectory")
94
- trajectory_container = gr.HTML() # Placeholder for our custom trajectory component
95
-
96
-
97
-
98
- run_btn.click(
99
- fn=process_run,
100
- inputs=[instruction, model_name, start_url],
101
- outputs=[state_view, browser_view, checklist_view, rm_cards_container, trajectory_container],
102
- api_name="run_agent",
103
- concurrency_limit=32,
104
- show_progress=True
105
- )
106
 
107
- logger.info("Launching Gradio interface")
108
- # Set max_threads to allow multiple concurrent requests
109
- demo.launch(share=True, max_threads=32)
110
 
111
  if __name__ == "__main__":
112
- # Add support for multiprocessing on Windows
113
- multiprocessing.freeze_support()
114
- main()
 
 
 
1
  import gradio as gr
2
+ from huggingface_hub import InferenceClient
 
3
 
4
+ """
5
+ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
+ """
7
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
+ def respond(
11
+ message,
12
+ history: list[tuple[str, str]],
13
+ system_message,
14
+ max_tokens,
15
+ temperature,
16
+ top_p,
17
+ ):
18
+ messages = [{"role": "system", "content": system_message}]
19
 
20
+ for val in history:
21
+ if val[0]:
22
+ messages.append({"role": "user", "content": val[0]})
23
+ if val[1]:
24
+ messages.append({"role": "assistant", "content": val[1]})
25
 
26
+ messages.append({"role": "user", "content": message})
 
 
 
 
27
 
28
+ response = ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
+ for message in client.chat_completion(
31
+ messages,
32
+ max_tokens=max_tokens,
33
+ stream=True,
34
+ temperature=temperature,
35
+ top_p=top_p,
36
+ ):
37
+ token = message.choices[0].delta.content
38
 
39
+ response += token
40
+ yield response
 
 
 
 
41
 
 
 
 
 
 
 
42
 
43
+ """
44
+ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
+ """
46
+ demo = gr.ChatInterface(
47
+ respond,
48
+ additional_inputs=[
49
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
+ gr.Slider(
53
+ minimum=0.1,
54
+ maximum=1.0,
55
+ value=0.95,
56
+ step=0.05,
57
+ label="Top-p (nucleus sampling)",
58
+ ),
59
+ ],
60
+ )
 
 
61
 
 
 
 
62
 
63
  if __name__ == "__main__":
64
+ demo.launch()