PKU-ML commited on
Commit
328f5b7
·
verified ·
1 Parent(s): c76a7c9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -93
app.py CHANGED
@@ -1,98 +1,18 @@
1
- import os
2
- os.system('pip install dashscope')
3
  import gradio as gr
4
- from http import HTTPStatus
5
- import dashscope
6
- from dashscope import Generation
7
- from dashscope.api_entities.dashscope_response import Role
8
- from typing import List, Optional, Tuple, Dict
9
- from urllib.error import HTTPError
10
- default_system = 'You are a helpful assistant.'
11
 
12
- YOUR_API_TOKEN = os.getenv('YOUR_API_TOKEN')
13
- dashscope.api_key = YOUR_API_TOKEN
14
 
15
- History = List[Tuple[str, str]]
16
- Messages = List[Dict[str, str]]
 
17
 
18
- def clear_session() -> History:
19
- return '', []
 
 
 
 
20
 
21
- def modify_system_session(system: str) -> str:
22
- if system is None or len(system) == 0:
23
- system = default_system
24
- return system, system, []
25
-
26
- def history_to_messages(history: History, system: str) -> Messages:
27
- messages = [{'role': Role.SYSTEM, 'content': system}]
28
- for h in history:
29
- messages.append({'role': Role.USER, 'content': h[0]})
30
- messages.append({'role': Role.ASSISTANT, 'content': h[1]})
31
- return messages
32
-
33
-
34
- def messages_to_history(messages: Messages) -> Tuple[str, History]:
35
- assert messages[0]['role'] == Role.SYSTEM
36
- system = messages[0]['content']
37
- history = []
38
- for q, r in zip(messages[1::2], messages[2::2]):
39
- history.append([q['content'], r['content']])
40
- return system, history
41
-
42
-
43
- def model_chat(query: Optional[str], history: Optional[History], system: str
44
- ) -> Tuple[str, str, History]:
45
- if query is None:
46
- query = ''
47
- if history is None:
48
- history = []
49
- messages = history_to_messages(history, system)
50
- messages.append({'role': Role.USER, 'content': query})
51
- gen = Generation.call(
52
- model = "G1-7B",
53
- messages=messages,
54
- result_format='message',
55
- stream=True
56
- )
57
- for response in gen:
58
- if response.status_code == HTTPStatus.OK:
59
- role = response.output.choices[0].message.role
60
- response = response.output.choices[0].message.content
61
- system, history = messages_to_history(messages + [{'role': role, 'content': response}])
62
- yield '', history, system
63
- else:
64
- raise HTTPError('Request id: %s, Status code: %s, error code: %s, error message: %s' % (
65
- response.request_id, response.status_code,
66
- response.code, response.message
67
- ))
68
-
69
-
70
- with gr.Blocks() as demo:
71
- gr.Markdown("""<p align="center"><img src="https://modelscope.cn/api/v1/models/qwen/Qwen-VL-Chat/repo?Revision=master&FilePath=assets/logo.jpg&View=true" style="height: 80px"/><p>""")
72
- gr.Markdown("""<center><font size=8>Qwen-1.8B-Chat Bot👾</center>""")
73
- gr.Markdown("""<center><font size=4>通义千问-1.8B(Qwen-1.8B) 是阿里云研发的通义千问大模型系列的18亿参数规模的模型。</center>""")
74
-
75
- with gr.Row():
76
- with gr.Column(scale=3):
77
- system_input = gr.Textbox(value=default_system, lines=1, label='System')
78
- with gr.Column(scale=1):
79
- modify_system = gr.Button("🛠️ 设置system并清除历史对话", scale=2)
80
- system_state = gr.Textbox(value=default_system, visible=False)
81
- chatbot = gr.Chatbot(label='G1-7B')
82
- textbox = gr.Textbox(lines=2, label='Input')
83
-
84
- with gr.Row():
85
- clear_history = gr.Button("🧹 清除历史对话")
86
- sumbit = gr.Button("🚀 发送")
87
-
88
- sumbit.click(model_chat,
89
- inputs=[textbox, chatbot, system_state],
90
- outputs=[textbox, chatbot, system_input])
91
- clear_history.click(fn=clear_session,
92
- inputs=[],
93
- outputs=[textbox, chatbot])
94
- modify_system.click(fn=modify_system_session,
95
- inputs=[system_input],
96
- outputs=[system_state, system_input, chatbot])
97
-
98
- demo.queue(api_open=False).launch(max_threads=10,height=800, share=False)
 
 
 
1
  import gradio as gr
2
+ from transformers import pipeline
 
 
 
 
 
 
3
 
4
+ pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog")
 
5
 
6
+ def predict(input_img):
7
+ predictions = pipeline(input_img)
8
+ return input_img, {p["label"]: p["score"] for p in predictions}
9
 
10
+ gradio_app = gr.Interface(
11
+ predict,
12
+ inputs=gr.Image(label="Select hot dog candidate", sources=['upload', 'webcam'], type="pil"),
13
+ outputs=[gr.Image(label="Processed Image"), gr.Label(label="Result", num_top_classes=2)],
14
+ title="Hot Dog? Or Not?",
15
+ )
16
 
17
+ if __name__ == "__main__":
18
+ gradio_app.launch()