820nam commited on
Commit
47c9735
ยท
verified ยท
1 Parent(s): 5f6451e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -4
app.py CHANGED
@@ -2,11 +2,11 @@ import streamlit as st
2
  import requests
3
  import matplotlib.pyplot as plt
4
  from transformers import pipeline
5
- import openai
6
  import os
7
 
8
  # OpenAI API ํ‚ค ์„ค์ •
9
- openai.api_key = "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" # ํ™˜๊ฒฝ ๋ณ€์ˆ˜ ๋˜๋Š” ์ง์ ‘ ํ‚ค ์ž…๋ ฅ
10
 
11
  # ๋„ค์ด๋ฒ„ ๋‰ด์Šค API๋ฅผ ํ†ตํ•ด ์‹ค์ œ ๋‰ด์Šค ๊ธฐ์‚ฌ ๊ฐ€์ ธ์˜ค๊ธฐ
12
  def fetch_naver_news(query, display=5):
@@ -41,13 +41,15 @@ def load_sentiment_model():
41
  # GPT-4๋ฅผ ์ด์šฉํ•ด ๋ฐ˜๋Œ€ ๊ด€์  ๊ธฐ์‚ฌ ์ƒ์„ฑ (์ตœ์‹  OpenAI API ๋ฐฉ์‹)
42
  def generate_article_gpt4(prompt):
43
  try:
44
- response = openai.Completion.create(
 
 
45
  model="gpt-4", # GPT-4 ๋ชจ๋ธ ์‚ฌ์šฉ
46
  prompt=prompt,
47
  max_tokens=512,
48
  temperature=0.7
49
  )
50
- return response['choices'][0]['text'].strip() # GPT์˜ ์‘๋‹ต ํ…์ŠคํŠธ
51
  except Exception as e:
52
  return f"Error generating text: {e}"
53
 
 
2
  import requests
3
  import matplotlib.pyplot as plt
4
  from transformers import pipeline
5
+ from openai import OpenAI
6
  import os
7
 
8
  # OpenAI API ํ‚ค ์„ค์ •
9
+ openai.api_key = os.getenv("OPENAI_API_KEY", "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") # ํ™˜๊ฒฝ ๋ณ€์ˆ˜ ๋˜๋Š” ์ง์ ‘ ํ‚ค ์ž…๋ ฅ
10
 
11
  # ๋„ค์ด๋ฒ„ ๋‰ด์Šค API๋ฅผ ํ†ตํ•ด ์‹ค์ œ ๋‰ด์Šค ๊ธฐ์‚ฌ ๊ฐ€์ ธ์˜ค๊ธฐ
12
  def fetch_naver_news(query, display=5):
 
41
  # GPT-4๋ฅผ ์ด์šฉํ•ด ๋ฐ˜๋Œ€ ๊ด€์  ๊ธฐ์‚ฌ ์ƒ์„ฑ (์ตœ์‹  OpenAI API ๋ฐฉ์‹)
42
  def generate_article_gpt4(prompt):
43
  try:
44
+ client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
45
+
46
+ response = client.completions.create(
47
  model="gpt-4", # GPT-4 ๋ชจ๋ธ ์‚ฌ์šฉ
48
  prompt=prompt,
49
  max_tokens=512,
50
  temperature=0.7
51
  )
52
+ return response.choices[0].text.strip() # GPT์˜ ์‘๋‹ต ํ…์ŠคํŠธ
53
  except Exception as e:
54
  return f"Error generating text: {e}"
55