merterbak commited on
Commit
28d4ff3
·
verified ·
1 Parent(s): 1616ee6
Files changed (1) hide show
  1. app.py +40 -40
app.py CHANGED
@@ -1,5 +1,4 @@
1
  from transformers import pipeline, TextIteratorStreamer
2
- import torch
3
  from threading import Thread
4
  import gradio as gr
5
  import spaces
@@ -10,8 +9,29 @@ from openai_harmony import (
10
  Role,
11
  Message,
12
  Conversation,
 
 
 
13
  )
14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  model_id = "openai/gpt-oss-20b"
16
 
17
  pipe = pipeline(
@@ -31,42 +51,23 @@ def format_conversation_history(chat_history):
31
  content = content[0]["text"] if content and "text" in content[0] else str(content)
32
  messages.append({"role": role, "content": content})
33
  return messages
34
- #OpenAI's harmony format
35
- def build_harmony_conversation_from_messages(messages):
36
- harmony_messages = []
37
- for m in messages:
38
- role = m["role"].lower()
39
- content = m["content"]
40
- if role == "system":
41
- harmony_messages.append(
42
- Message.from_role_and_content(
43
- Role.SYSTEM,
44
- content,
45
- )
46
- )
47
- elif role == "user":
48
- harmony_messages.append(
49
- Message.from_role_and_content(
50
- Role.USER,
51
- content,
52
- )
53
- )
54
- elif role == "assistant":
55
- harmony_messages.append(
56
- Message.from_role_and_content(
57
- Role.ASSISTANT,
58
- content,
59
- )
60
- )
61
- return Conversation.from_messages(harmony_messages)
62
-
63
  @spaces.GPU()
64
  def generate_response(input_data, chat_history, max_new_tokens, system_prompt, temperature, top_p, top_k, repetition_penalty):
65
  new_message = {"role": "user", "content": input_data}
66
- system_message = [{"role": "system", "content": system_prompt}] if system_prompt else []
67
  processed_history = format_conversation_history(chat_history)
68
- messages = system_message + processed_history + [new_message]
69
- conversation = build_harmony_conversation_from_messages(messages)
 
 
 
 
 
 
 
 
 
 
70
  prompt_tokens = enc.render_conversation_for_completion(conversation, Role.ASSISTANT)
71
  prompt_text = pipe.tokenizer.decode(prompt_tokens, skip_special_tokens=False)
72
 
@@ -85,21 +86,20 @@ def generate_response(input_data, chat_history, max_new_tokens, system_prompt, t
85
  thread = Thread(target=pipe, args=(prompt_text,), kwargs=generation_kwargs)
86
  thread.start()
87
 
 
88
  thinking = ""
89
  final = ""
90
  started_final = False
91
  for chunk in streamer:
92
  if not started_final:
93
- if "assistantfinal" in chunk.lower():
94
- split_parts = re.split(r'(?i)assistantfinal', chunk, maxsplit=1)
95
- thinking += split_parts[0]
96
- final += split_parts[1]
97
  started_final = True
98
- else:
99
- thinking += chunk
100
  else:
101
  final += chunk
102
- clean_thinking = re.sub(r'^analysis\s*', '', thinking, flags=re.I).strip()
103
  clean_final = final.strip()
104
  formatted = f"<details open><summary>Click to view Thinking Process</summary>\n\n{clean_thinking}\n\n</details>\n\n{clean_final}"
105
  yield formatted
 
1
  from transformers import pipeline, TextIteratorStreamer
 
2
  from threading import Thread
3
  import gradio as gr
4
  import spaces
 
9
  Role,
10
  Message,
11
  Conversation,
12
+ SystemContent,
13
+ DeveloperContent,
14
+ ReasoningEffort,
15
  )
16
 
17
+ # regex config
18
+ RE_REASONING = re.compile(r'(?i)Reasoning:\s*(low|medium|high)')
19
+ RE_FINAL_MARKER = re.compile(r'(?i)assistantfinal')
20
+ RE_ANALYSIS_PREFIX = re.compile(r'(?i)^analysis\s*')
21
+
22
+ # I think for system prompt reasoning level OpenAI mentioned you should do parsing so here's
23
+ def parse_reasoning_and_instructions(system_prompt: str):
24
+ instructions = system_prompt or "You are a helpful assistant."
25
+ match = RE_REASONING.search(instructions)
26
+ effort_key = match.group(1).lower() if match else 'medium'
27
+ effort = {
28
+ 'low': ReasoningEffort.LOW,
29
+ 'medium': ReasoningEffort.MEDIUM,
30
+ 'high': ReasoningEffort.HIGH,
31
+ }.get(effort_key, ReasoningEffort.MEDIUM)
32
+ cleaned_instructions = RE_REASONING.sub('', instructions).strip()
33
+ return effort, cleaned_instructions
34
+
35
  model_id = "openai/gpt-oss-20b"
36
 
37
  pipe = pipeline(
 
51
  content = content[0]["text"] if content and "text" in content[0] else str(content)
52
  messages.append({"role": role, "content": content})
53
  return messages
54
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  @spaces.GPU()
56
  def generate_response(input_data, chat_history, max_new_tokens, system_prompt, temperature, top_p, top_k, repetition_penalty):
57
  new_message = {"role": "user", "content": input_data}
 
58
  processed_history = format_conversation_history(chat_history)
59
+ effort, instructions = parse_reasoning_and_instructions(system_prompt)
60
+ system_content = SystemContent.new().with_reasoning_effort(effort)
61
+ developer_content = DeveloperContent.new().with_instructions(instructions)
62
+ harmony_messages = [
63
+ Message.from_role_and_content(Role.SYSTEM, system_content),
64
+ Message.from_role_and_content(Role.DEVELOPER, developer_content),
65
+ ]
66
+
67
+ for m in processed_history + [new_message]:
68
+ role = Role.USER if m["role"] == "user" else Role.ASSISTANT
69
+ harmony_messages.append(Message.from_role_and_content(role, m["content"]))
70
+ conversation = Conversation.from_messages(harmony_messages)
71
  prompt_tokens = enc.render_conversation_for_completion(conversation, Role.ASSISTANT)
72
  prompt_text = pipe.tokenizer.decode(prompt_tokens, skip_special_tokens=False)
73
 
 
86
  thread = Thread(target=pipe, args=(prompt_text,), kwargs=generation_kwargs)
87
  thread.start()
88
 
89
+ # parsing thinking
90
  thinking = ""
91
  final = ""
92
  started_final = False
93
  for chunk in streamer:
94
  if not started_final:
95
+ parts = RE_FINAL_MARKER.split(chunk, maxsplit=1)
96
+ thinking += parts[0]
97
+ if len(parts) > 1:
98
+ final += parts[-1]
99
  started_final = True
 
 
100
  else:
101
  final += chunk
102
+ clean_thinking = RE_ANALYSIS_PREFIX.sub('', thinking).strip()
103
  clean_final = final.strip()
104
  formatted = f"<details open><summary>Click to view Thinking Process</summary>\n\n{clean_thinking}\n\n</details>\n\n{clean_final}"
105
  yield formatted