ginipick commited on
Commit
9a48a94
Β·
verified Β·
1 Parent(s): 75e05d9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +120 -49
app.py CHANGED
@@ -1,21 +1,23 @@
1
- # CHATGPT API둜 λ³€κ²½
2
-
3
  # ──────────────────────────────── Imports ────────────────────────────────
4
  import os, json, re, logging, requests, markdown, time, io
5
  from datetime import datetime
6
 
7
  import streamlit as st
8
- import anthropic
 
 
 
9
  from gradio_client import Client
10
  import pandas as pd
11
  import PyPDF2 # For handling PDF files
12
 
13
  # ──────────────────────────────── Environment Variables / Constants ─────────────────────────
14
- ANTHROPIC_KEY = os.getenv("API_KEY", "")
 
15
  BRAVE_KEY = os.getenv("SERPHOUSE_API_KEY", "") # Keep this name
16
  BRAVE_ENDPOINT = "https://api.search.brave.com/res/v1/web/search"
17
  IMAGE_API_URL = "http://211.233.58.201:7896"
18
- MAX_TOKENS = 7_999
19
 
20
  # Blog template and style definitions (in English)
21
  BLOG_TEMPLATES = {
@@ -45,10 +47,18 @@ EXAMPLE_TOPICS = {
45
  logging.basicConfig(level=logging.INFO,
46
  format="%(asctime)s - %(levelname)s - %(message)s")
47
 
48
- # ──────────────────────────────── Anthropic Client ────────────────────────
49
  @st.cache_resource
50
- def get_anthropic_client():
51
- return anthropic.Anthropic(api_key=ANTHROPIC_KEY)
 
 
 
 
 
 
 
 
52
 
53
  # ──────────────────────────────── Blog Creation System Prompt ─────────────
54
  def get_system_prompt(template="ginigen", tone="professional", word_count=1750, include_search_results=False, include_uploaded_files=False) -> str:
@@ -514,19 +524,55 @@ def extract_image_prompt(blog_text: str, topic: str):
514
  Analyze the blog content (blog_text) to generate a one-line English image prompt
515
  related to the topic.
516
  """
517
- client = get_anthropic_client()
518
- sys = f"Generate a single-line English image prompt from the following text:\nTopic: {topic}"
 
 
 
 
 
 
 
 
 
 
 
 
519
  try:
520
- # Simple one-time call
521
- res = client.messages.create(
522
- model="claude-3-7-sonnet-20250219",
523
- max_tokens=80,
524
- system=sys,
525
- messages=[{"role": "user", "content": blog_text}]
 
 
 
 
 
 
 
 
 
 
 
526
  )
527
- return res.content[0].text.strip()
528
- except Exception:
529
- # Fallback prompt
 
 
 
 
 
 
 
 
 
 
 
 
 
530
  return f"A professional photo related to {topic}, high quality"
531
 
532
  def md_to_html(md: str, title="Ginigen Blog"):
@@ -544,7 +590,8 @@ def ginigen_app():
544
 
545
  # Set default session state
546
  if "ai_model" not in st.session_state:
547
- st.session_state.ai_model = "claude-3-7-sonnet-20250219"
 
548
  if "messages" not in st.session_state:
549
  st.session_state.messages = []
550
  if "auto_save" not in st.session_state:
@@ -710,7 +757,7 @@ def process_example(topic):
710
  process_input(topic, [])
711
 
712
  def process_input(prompt: str, uploaded_files):
713
- # Add user's message if it doesn't already exist
714
  if not any(m["role"] == "user" and m["content"] == prompt for m in st.session_state.messages):
715
  st.session_state.messages.append({"role": "user", "content": prompt})
716
 
@@ -725,7 +772,7 @@ def process_input(prompt: str, uploaded_files):
725
  has_uploaded_files = bool(uploaded_files) and len(uploaded_files) > 0
726
 
727
  try:
728
- client = get_anthropic_client()
729
 
730
  # Prepare conversation messages
731
  messages = [{"role": m["role"], "content": m["content"]} for m in st.session_state.messages]
@@ -752,7 +799,7 @@ def process_input(prompt: str, uploaded_files):
752
  include_uploaded_files=has_uploaded_files
753
  )
754
 
755
- # If we have file content, append it to the system prompt
756
  if file_content:
757
  sys_prompt += (
758
  "\n\n"
@@ -761,7 +808,7 @@ def process_input(prompt: str, uploaded_files):
761
  "Ensure the file content is accurately reflected in the blog.\n"
762
  )
763
 
764
- # Append additional user message about file usage
765
  if has_uploaded_files:
766
  extra_user_msg = (
767
  f"{prompt}\n\n"
@@ -770,19 +817,53 @@ def process_input(prompt: str, uploaded_files):
770
  )
771
  messages.append({"role": "user", "content": extra_user_msg})
772
 
773
- # Claude streaming
774
- with client.messages.stream(
775
- model=st.session_state.ai_model,
776
- max_tokens=MAX_TOKENS,
777
- system=sys_prompt,
778
- messages=messages
779
- ) as stream:
780
- for t in stream.text_stream:
781
- answer += t or ""
782
- placeholder.markdown(answer + "β–Œ")
783
- placeholder.markdown(answer)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
784
 
785
- # Image generation option
 
 
 
786
  answer_entry_saved = False
787
  if st.session_state.generate_image:
788
  with st.spinner("Generating image..."):
@@ -797,8 +878,8 @@ def process_input(prompt: str, uploaded_files):
797
  "image_caption": cap
798
  })
799
  answer_entry_saved = True
800
-
801
- # Save the answer
802
  if not answer_entry_saved:
803
  st.session_state.messages.append({"role": "assistant", "content": answer})
804
 
@@ -827,16 +908,6 @@ def process_input(prompt: str, uploaded_files):
827
  except Exception as e:
828
  logging.error(f"Auto-save failed: {e}")
829
 
830
- except anthropic.BadRequestError as e:
831
- error_message = str(e)
832
- if "credit balance is too low" in error_message:
833
- placeholder.error("⚠️ Insufficient API credits: Please top up your Anthropic API account.")
834
- ans = "Unable to generate blog due to low API credits. Please recharge and try again."
835
- else:
836
- placeholder.error(f"API request error: {error_message}")
837
- ans = f"An error occurred while calling the API: {error_message}"
838
- st.session_state.messages.append({"role": "assistant", "content": ans})
839
-
840
  except Exception as e:
841
  error_message = str(e)
842
  placeholder.error(f"An error occurred: {error_message}")
@@ -848,4 +919,4 @@ def main():
848
  ginigen_app()
849
 
850
  if __name__ == "__main__":
851
- main()
 
 
 
1
  # ──────────────────────────────── Imports ────────────────────────────────
2
  import os, json, re, logging, requests, markdown, time, io
3
  from datetime import datetime
4
 
5
  import streamlit as st
6
+ # >>> Anthropic λΆ€λΆ„ μ‚­μ œ
7
+ # import anthropic
8
+ from openai import OpenAI # μ»€μŠ€ν…€ 래퍼(λ˜λŠ” 별도 라이브러리)라고 κ°€μ •
9
+
10
  from gradio_client import Client
11
  import pandas as pd
12
  import PyPDF2 # For handling PDF files
13
 
14
  # ──────────────────────────────── Environment Variables / Constants ─────────────────────────
15
+ # κΈ°μ‘΄ ANTHROPIC_KEY -> OPENAI_API_KEY 둜 λ³€κ²½
16
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
17
  BRAVE_KEY = os.getenv("SERPHOUSE_API_KEY", "") # Keep this name
18
  BRAVE_ENDPOINT = "https://api.search.brave.com/res/v1/web/search"
19
  IMAGE_API_URL = "http://211.233.58.201:7896"
20
+ MAX_TOKENS = 7999
21
 
22
  # Blog template and style definitions (in English)
23
  BLOG_TEMPLATES = {
 
47
  logging.basicConfig(level=logging.INFO,
48
  format="%(asctime)s - %(levelname)s - %(message)s")
49
 
50
+ # ──────────────────────────────── OpenAI Client ──────────────────────────
51
  @st.cache_resource
52
+ def get_openai_client():
53
+ """
54
+ μ»€μŠ€ν…€ OpenAI 객체λ₯Ό μƒμ„±ν•œλ‹€κ³  κ°€μ •ν•©λ‹ˆλ‹€.
55
+ μ‹€μ œλ‘œλŠ” openai.api_key = OPENAI_API_KEY 둜만 μ„€μ •ν•˜λŠ” κ²½μš°κ°€ λ§ŽμŠ΅λ‹ˆλ‹€.
56
+ """
57
+ if not OPENAI_API_KEY:
58
+ raise RuntimeError("⚠️ OPENAI_API_KEY ν™˜κ²½ λ³€μˆ˜κ°€ μ„€μ •λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.")
59
+ # μ—¬κΈ°μ„œλŠ” μ˜ˆμ‹œλ‘œμ„œ λ‹€μŒκ³Ό 같이 μ΄ˆκΈ°ν™”:
60
+ client = OpenAI(api_key=OPENAI_API_KEY)
61
+ return client
62
 
63
  # ──────────────────────────────── Blog Creation System Prompt ─────────────
64
  def get_system_prompt(template="ginigen", tone="professional", word_count=1750, include_search_results=False, include_uploaded_files=False) -> str:
 
524
  Analyze the blog content (blog_text) to generate a one-line English image prompt
525
  related to the topic.
526
  """
527
+ # κΈ°μ‘΄ anthropic ν΄λΌμ΄μ–ΈνŠΈ μ‚¬μš© -> OpenAI 호좜둜 λŒ€μ²΄
528
+ client = get_openai_client()
529
+
530
+ system_msg = (
531
+ f"Generate a single-line English image prompt from the following text:\n"
532
+ f"Topic: {topic}\n\n"
533
+ f"---\n"
534
+ f"{blog_text}\n\n"
535
+ f"---\n"
536
+ f"Return only the prompt text, nothing else."
537
+ )
538
+
539
+ # μ‹€μ œλ‘œλŠ” openai APIλ₯Ό μ–΄λ–»κ²Œ ν˜ΈμΆœν•˜λŠλƒμ— 따라 λ‹¬λΌμ§‘λ‹ˆλ‹€.
540
+ # μ—¬κΈ°μ„œλŠ” μ˜ˆμ‹œλ‘œ client.responses.create()λ₯Ό μ‚¬μš©ν•œλ‹€κ³  κ°€μ •
541
  try:
542
+ response = client.responses.create(
543
+ model="gpt-4.1-mini",
544
+ input=[
545
+ {
546
+ "role": "system",
547
+ "content": [
548
+ {
549
+ "type": "input_text",
550
+ "text": system_msg
551
+ }
552
+ ]
553
+ }
554
+ ],
555
+ text={"format": {"type": "text"}},
556
+ temperature=1,
557
+ max_output_tokens=80,
558
+ top_p=1
559
  )
560
+ # μ˜ˆμ‹œ: response κ°μ²΄μ—μ„œ 첫 번째 좜λ ₯만 κ°€μ Έμ˜¨λ‹€κ³  κ°€μ •
561
+ # response κ΅¬μ‘°λŠ” μ‚¬μš© 쀑인 λž˜νΌμ— 따라 λ‹€λ¦…λ‹ˆλ‹€.
562
+ content = ""
563
+ if "responses" in dir(response):
564
+ # 가상 μ˜ˆμ‹œ: response.responses[0].content[0].text
565
+ # λ˜λŠ” response["choices"][0]["text"] ν˜•νƒœμΌ μˆ˜λ„ 있음
566
+ first_resp = response.responses[0] # κ°€μ •
567
+ # λ³Έ μ˜ˆμ‹œλŠ” "content" ν•„λ“œκ°€ list둜 있고, κ·Έ 쀑 [0]["text"]에 값이 μžˆλ‹€κ³  κ°€μ •
568
+ content = first_resp.content[0]["text"].strip()
569
+ else:
570
+ content = "A professional photo related to the topic, high quality"
571
+
572
+ return content
573
+
574
+ except Exception as e:
575
+ logging.error(f"OpenAI image prompt generation error: {e}")
576
  return f"A professional photo related to {topic}, high quality"
577
 
578
  def md_to_html(md: str, title="Ginigen Blog"):
 
590
 
591
  # Set default session state
592
  if "ai_model" not in st.session_state:
593
+ # κΈ°μ‘΄ anthropic λͺ¨λΈλͺ… λŒ€μ‹ , gpt-4.1-mini
594
+ st.session_state.ai_model = "gpt-4.1-mini"
595
  if "messages" not in st.session_state:
596
  st.session_state.messages = []
597
  if "auto_save" not in st.session_state:
 
757
  process_input(topic, [])
758
 
759
  def process_input(prompt: str, uploaded_files):
760
+ # Add user's message
761
  if not any(m["role"] == "user" and m["content"] == prompt for m in st.session_state.messages):
762
  st.session_state.messages.append({"role": "user", "content": prompt})
763
 
 
772
  has_uploaded_files = bool(uploaded_files) and len(uploaded_files) > 0
773
 
774
  try:
775
+ client = get_openai_client()
776
 
777
  # Prepare conversation messages
778
  messages = [{"role": m["role"], "content": m["content"]} for m in st.session_state.messages]
 
799
  include_uploaded_files=has_uploaded_files
800
  )
801
 
802
+ # 파일 λ‚΄μš©μ΄ μžˆλ‹€λ©΄ system prompt에 μΆ”κ°€
803
  if file_content:
804
  sys_prompt += (
805
  "\n\n"
 
808
  "Ensure the file content is accurately reflected in the blog.\n"
809
  )
810
 
811
+ # μ‚¬μš©μžκ°€ μž…λ ₯ν•œ prompt와 파일 μ°Έκ³  λ©”μ‹œμ§€
812
  if has_uploaded_files:
813
  extra_user_msg = (
814
  f"{prompt}\n\n"
 
817
  )
818
  messages.append({"role": "user", "content": extra_user_msg})
819
 
820
+ # 이제 OpenAI client둜 μš”μ²­μ„ λ³΄λƒ…λ‹ˆλ‹€.
821
+ with st.spinner("Generating blog content..."):
822
+ response = client.responses.create(
823
+ model=st.session_state.ai_model,
824
+ # OpenAI λž˜νΌμ— λ§žμΆ°μ„œ λŒ€λž΅μ μœΌλ‘œ μž¬κ΅¬μ„±ν•œ μ˜ˆμž…λ‹ˆλ‹€.
825
+ input=[
826
+ {
827
+ "role": "system",
828
+ "content": [
829
+ {
830
+ "type": "input_text",
831
+ "text": sys_prompt
832
+ }
833
+ ]
834
+ },
835
+ {
836
+ "role": "user",
837
+ "content": [
838
+ {
839
+ "type": "input_text",
840
+ "text": prompt
841
+ }
842
+ ]
843
+ }
844
+ # ν•„μš”ν•˜λ©΄ messages 전체λ₯Ό 넣을 μˆ˜λ„ μžˆμ§€λ§Œ,
845
+ # μ—¬κΈ°μ„œλŠ” prompt와 system prompt만 λ„£λŠ” μ˜ˆμ‹œ
846
+ ],
847
+ text={"format": {"type": "text"}},
848
+ temperature=1,
849
+ max_output_tokens=MAX_TOKENS,
850
+ top_p=1,
851
+ store=True
852
+ )
853
+
854
+ # μ˜ˆμ‹œμƒ response κ°μ²΄μ—μ„œ μ΅œμ’… ν…μŠ€νŠΈλ₯Ό κΊΌλ‚΄λŠ” 둜직
855
+ # μ‹€μ œ κ΅¬μ‘°λŠ” 라이브러리 κ΅¬ν˜„μ— 따라 λ‹€λ¦…λ‹ˆλ‹€
856
+ if "responses" in dir(response):
857
+ # κ°€μ •: response.responses[0].content[0].text ν˜•νƒœ
858
+ content_blocks = response.responses[0].content
859
+ answer = "\n".join(block["text"] for block in content_blocks if block["type"] == "output_text")
860
+ else:
861
+ answer = "Error: Unable to parse the OpenAI response."
862
 
863
+ # 슀트리밍이 μ•„λ‹ˆλ―€λ‘œ οΏ½οΏ½οΏ½λ²ˆμ— answer νšλ“ ν›„ 좜λ ₯
864
+ placeholder.markdown(answer)
865
+
866
+ # 이미지 생성
867
  answer_entry_saved = False
868
  if st.session_state.generate_image:
869
  with st.spinner("Generating image..."):
 
878
  "image_caption": cap
879
  })
880
  answer_entry_saved = True
881
+
882
+ # Save the answer if not saved above
883
  if not answer_entry_saved:
884
  st.session_state.messages.append({"role": "assistant", "content": answer})
885
 
 
908
  except Exception as e:
909
  logging.error(f"Auto-save failed: {e}")
910
 
 
 
 
 
 
 
 
 
 
 
911
  except Exception as e:
912
  error_message = str(e)
913
  placeholder.error(f"An error occurred: {error_message}")
 
919
  ginigen_app()
920
 
921
  if __name__ == "__main__":
922
+ main()