File size: 5,300 Bytes
384f5e4 bfd4ab7 ebf358a 0a4103c febef4d 717fe8c ebf358a 5f6451e 717fe8c 0a4103c bfd4ab7 0a4103c 717fe8c 0a4103c 8b3a49e 0a4103c ebf358a 0a4103c ebf358a 5f6451e ebf358a d10e98d 5f6451e 242b668 0a4103c ebf358a 5f6451e d10e98d 242b668 5a80960 0a4103c 5a80960 0a4103c ebf358a 8b3a49e ebf358a 0a4103c bfd4ab7 717fe8c 0a4103c 8b3a49e ebf358a 0a4103c ebf358a 384f5e4 ebf358a 0a4103c 384f5e4 bfd4ab7 0a4103c bfd4ab7 0a4103c ebf358a 0a4103c ebf358a 95eccad 0a4103c ebf358a 0a4103c 384f5e4 0a4103c ebf358a 0a4103c ebf358a 0a4103c ebf358a 0a4103c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 |
import streamlit as st
import requests
import matplotlib.pyplot as plt
from transformers import pipeline
import openai
import os
# OpenAI API ํค ์ค์
openai.api_key = "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" # ํ๊ฒฝ ๋ณ์ ๋๋ ์ง์ ํค ์
๋ ฅ
# ๋ค์ด๋ฒ ๋ด์ค API๋ฅผ ํตํด ์ค์ ๋ด์ค ๊ธฐ์ฌ ๊ฐ์ ธ์ค๊ธฐ
def fetch_naver_news(query, display=5):
client_id = "I_8koTJh3R5l4wLurQbG" # ๋ค์ด๋ฒ ๊ฐ๋ฐ์ ์ผํฐ์์ ๋ฐ๊ธ๋ฐ์ Client ID
client_secret = "W5oWYlAgur" # ๋ค์ด๋ฒ ๊ฐ๋ฐ์ ์ผํฐ์์ ๋ฐ๊ธ๋ฐ์ Client Secret
url = "https://openapi.naver.com/v1/search/news.json"
headers = {
"X-Naver-Client-Id": client_id,
"X-Naver-Client-Secret": client_secret,
}
params = {
"query": query,
"display": display,
"start": 1,
"sort": "date", # ์ต์ ์์ผ๋ก ์ ๋ ฌ
}
response = requests.get(url, headers=headers, params=params)
if response.status_code == 200:
news_data = response.json()
return news_data['items'] # ๋ด์ค ๊ธฐ์ฌ ๋ฆฌ์คํธ ๋ฐํ
else:
st.error("๋ด์ค ๋ฐ์ดํฐ๋ฅผ ๋ถ๋ฌ์ค๋ ๋ฐ ์คํจํ์ต๋๋ค.")
return []
# ์ ์น ์ฑํฅ ๋ถ์ ๋ชจ๋ธ ๋ก๋
def load_sentiment_model():
classifier = pipeline("text-classification", model="bucketresearch/politicalBiasBERT")
return classifier
# GPT-4๋ฅผ ์ด์ฉํด ๋ฐ๋ ๊ด์ ๊ธฐ์ฌ ์์ฑ (์ต์ OpenAI API ๋ฐฉ์)
def generate_article_gpt4(prompt):
try:
response = openai.Completion.create(
model="gpt-4", # GPT-4 ๋ชจ๋ธ ์ฌ์ฉ
prompt=prompt,
max_tokens=512,
temperature=0.7
)
return response['choices'][0]['text'].strip() # GPT์ ์๋ต ํ
์คํธ
except Exception as e:
return f"Error generating text: {e}"
# ์ ์น ์ฑํฅ ๋ถ์
def analyze_article_sentiment(text, classifier):
result = classifier(text[:512]) # ๋๋ฌด ๊ธด ํ
์คํธ๋ ์๋ผ์ ๋ถ์
label = result[0]["label"]
score = result[0]["score"]
# ๋ชจ๋ธ์์ ๋ฐํํ๋ ๋ผ๋ฒจ์ "์ง๋ณด", "๋ณด์", "์ค๋ฆฝ"์ผ๋ก ๋งคํ
if label == "LEFT":
return "์ง๋ณด", score
elif label == "RIGHT":
return "๋ณด์", score
else:
return "์ค๋ฆฝ", score
# ์ ์น์ ๊ด์ ๋น๊ต ๋ฐ ๋ฐ๋ ๊ด์ ์์ฑ
def analyze_news_political_viewpoint(query):
# ๋ด์ค ๋ฐ์ดํฐ ๊ฐ์ ธ์ค๊ธฐ
news_items = fetch_naver_news(query)
if not news_items:
return [], {}
classifier = load_sentiment_model()
results = []
sentiment_counts = {"์ง๋ณด": 0, "๋ณด์": 0, "์ค๋ฆฝ": 0} # ๋งคํ๋ ๋ผ๋ฒจ์ ๋ง๊ฒ ์ด๊ธฐํ
for item in news_items:
title = item["title"]
description = item["description"]
combined_text = f"{title}. {description}"
# ๊ธฐ์ฌ ์ฑํฅ ๋ถ์
sentiment, score = analyze_article_sentiment(combined_text, classifier)
sentiment_counts[sentiment] += 1 # ๋งคํ๋ ํค๋ก ์นด์ดํธ ์ฆ๊ฐ
# ๋ฐ๋ ๊ด์ ๊ธฐ์ฌ ์์ฑ
opposite_perspective = "๋ณด์์ " if sentiment == "์ง๋ณด" else "์ง๋ณด์ "
prompt = f"{combined_text}๋ฅผ ๊ธฐ๋ฐ์ผ๋ก {opposite_perspective} ๊ด์ ์ ๊ธฐ์ฌ๋ฅผ ์์ฑํด์ฃผ์ธ์."
opposite_article = generate_article_gpt4(prompt)
results.append({
"์ ๋ชฉ": title,
"์๋ณธ ๊ธฐ์ฌ": description,
"์ฑํฅ": sentiment,
"์ฑํฅ ์ ์": score,
"๋์กฐ ๊ด์ ๊ธฐ์ฌ": opposite_article
})
return results, sentiment_counts
# ์ฑํฅ ๋ถํฌ ์๊ฐํ
def visualize_sentiment_distribution(sentiment_counts):
fig, ax = plt.subplots()
labels = list(sentiment_counts.keys())
sizes = list(sentiment_counts.values())
ax.pie(sizes, labels=labels, autopct='%1.1f%%', startangle=90, colors=["blue", "red", "gray"])
ax.axis("equal")
st.pyplot(fig)
# Streamlit ์ ํ๋ฆฌ์ผ์ด์
st.title("์ ์น์ ๊ด์ ๋น๊ต ๋ถ์ ๋๊ตฌ")
st.markdown("๋ด์ค ๊ธฐ์ฌ์ ์ ์น ์ฑํฅ ๋ถ์๊ณผ ๋ฐ๋ ๊ด์ ๊ธฐ์ฌ๋ฅผ ์์ฑํ์ฌ ๋น๊ตํฉ๋๋ค.")
query = st.text_input("๊ฒ์ ํค์๋๋ฅผ ์
๋ ฅํ์ธ์", value="์ ์น")
if st.button("๋ถ์ ์์"):
with st.spinner("๋ถ์ ์ค..."):
analysis_results, sentiment_counts = analyze_news_political_viewpoint(query)
if analysis_results:
st.success("๋ด์ค ๋ถ์์ด ์๋ฃ๋์์ต๋๋ค.")
# ์ฑํฅ ๋ถํฌ ์๊ฐํ
st.subheader("์ฑํฅ ๋ถํฌ ์๊ฐํ")
visualize_sentiment_distribution(sentiment_counts)
# ์์ธ ๋ถ์ ๊ฒฐ๊ณผ ์ถ๋ ฅ
st.subheader("์์ธ ๋ถ์ ๊ฒฐ๊ณผ")
for result in analysis_results:
st.write(f"#### ์ ๋ชฉ: {result['์ ๋ชฉ']}")
st.write(f"- **์๋ณธ ๊ธฐ์ฌ**: {result['์๋ณธ ๊ธฐ์ฌ']}")
st.write(f"- **์ฑํฅ**: {result['์ฑํฅ']} (์ ์: {result['์ฑํฅ ์ ์']:.2f})")
st.write(f"- **๋์กฐ ๊ด์ ๊ธฐ์ฌ**: {result['๋์กฐ ๊ด์ ๊ธฐ์ฌ']}")
st.write("---")
else:
st.error("๋ถ์๋ ๋ด์ค ๋ฐ์ดํฐ๊ฐ ์์ต๋๋ค.")
|