Update app.py
Browse files
app.py
CHANGED
@@ -5,6 +5,11 @@ import seaborn as sns
|
|
5 |
from transformers import pipeline
|
6 |
import openai
|
7 |
import os
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
# Streamlit ํ์ด์ง ์ค์ ์ ๊ฐ์ฅ ๋จผ์ ํธ์ถ
|
10 |
st.set_page_config(page_title="์ ์น์ ๊ด์ ๋ถ์", page_icon="๐ฐ", layout="wide")
|
@@ -54,10 +59,52 @@ def fetch_naver_news(query, display=5):
|
|
54 |
st.error("๋ด์ค ๋ฐ์ดํฐ๋ฅผ ๋ถ๋ฌ์ค๋ ๋ฐ ์คํจํ์ต๋๋ค.")
|
55 |
return []
|
56 |
|
57 |
-
#
|
58 |
-
def
|
59 |
-
|
60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
|
62 |
# GPT-4๋ฅผ ์ด์ฉํด ๋ฐ๋ ๊ด์ ๊ธฐ์ฌ ์์ฑ
|
63 |
def generate_article_gpt4(prompt):
|
@@ -76,28 +123,13 @@ def generate_article_gpt4(prompt):
|
|
76 |
except Exception as e:
|
77 |
return f"Error generating text: {e}"
|
78 |
|
79 |
-
# ์ ์น ์ฑํฅ ๋ถ์
|
80 |
-
def analyze_article_sentiment(text, classifier):
|
81 |
-
result = classifier(text[:512]) # ๋๋ฌด ๊ธด ํ
์คํธ๋ ์๋ผ์ ๋ถ์
|
82 |
-
label = result[0]["label"]
|
83 |
-
score = result[0]["score"]
|
84 |
-
|
85 |
-
# ๋ชจ๋ธ์์ ๋ฐํํ๋ ๋ผ๋ฒจ์ "์ง๋ณด", "๋ณด์", "์ค๋ฆฝ"์ผ๋ก ๋งคํ
|
86 |
-
if label == "LEFT":
|
87 |
-
return "์ง๋ณด", score
|
88 |
-
elif label == "RIGHT":
|
89 |
-
return "๋ณด์", score
|
90 |
-
else:
|
91 |
-
return "์ค๋ฆฝ", score
|
92 |
-
|
93 |
# ์ ์น์ ๊ด์ ๋น๊ต ๋ฐ ๋ฐ๋ ๊ด์ ์์ฑ
|
94 |
-
def analyze_news_political_viewpoint(query):
|
95 |
# ๋ด์ค ๋ฐ์ดํฐ ๊ฐ์ ธ์ค๊ธฐ
|
96 |
news_items = fetch_naver_news(query)
|
97 |
if not news_items:
|
98 |
return [], {}
|
99 |
|
100 |
-
classifier = load_sentiment_model()
|
101 |
results = []
|
102 |
sentiment_counts = {"์ง๋ณด": 0, "๋ณด์": 0, "์ค๋ฆฝ": 0} # ๋งคํ๋ ๋ผ๋ฒจ์ ๋ง๊ฒ ์ด๊ธฐํ
|
103 |
|
@@ -107,8 +139,8 @@ def analyze_news_political_viewpoint(query):
|
|
107 |
link = item["link"] # ๋ด์ค ๋งํฌ ๊ฐ์ ธ์ค๊ธฐ
|
108 |
combined_text = f"{title}. {description}"
|
109 |
|
110 |
-
#
|
111 |
-
sentiment
|
112 |
sentiment_counts[sentiment] += 1 # ๋งคํ๋ ํค๋ก ์นด์ดํธ ์ฆ๊ฐ
|
113 |
|
114 |
# ๋ฐ๋ ๊ด์ ๊ธฐ์ฌ ์์ฑ
|
@@ -120,7 +152,6 @@ def analyze_news_political_viewpoint(query):
|
|
120 |
"์ ๋ชฉ": title,
|
121 |
"์๋ณธ ๊ธฐ์ฌ": description,
|
122 |
"์ฑํฅ": sentiment,
|
123 |
-
"์ฑํฅ ์ ์": score,
|
124 |
"๋์กฐ ๊ด์ ๊ธฐ์ฌ": opposite_article,
|
125 |
"๋ด์ค ๋งํฌ": link # ๋งํฌ ์ถ๊ฐ
|
126 |
})
|
@@ -146,29 +177,34 @@ def visualize_sentiment_distribution(sentiment_counts):
|
|
146 |
st.title("๐ฐ ์ ์น์ ๊ด์ ๋น๊ต ๋ถ์ ๋๊ตฌ")
|
147 |
st.markdown("๋ด์ค ๊ธฐ์ฌ์ ์ ์น ์ฑํฅ ๋ถ์๊ณผ ๋ฐ๋ ๊ด์ ๊ธฐ์ฌ๋ฅผ ์์ฑํ์ฌ ๋น๊ตํฉ๋๋ค.")
|
148 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
149 |
# ์ฌ์ฉ์๋ก๋ถํฐ ๊ฒ์์ด ์
๋ ฅ ๋ฐ๊ธฐ
|
150 |
query = st.text_input("๊ฒ์ ํค์๋๋ฅผ ์
๋ ฅํ์ธ์", value="์ ์น")
|
151 |
|
152 |
# ๋ถ์ ์์ ๋ฒํผ
|
153 |
if st.button("๐ ๋ถ์ ์์"):
|
154 |
with st.spinner("๋ถ์ ์ค..."):
|
155 |
-
analysis_results, sentiment_counts = analyze_news_political_viewpoint(query)
|
156 |
|
157 |
if analysis_results:
|
158 |
st.success("๋ด์ค ๋ถ์์ด ์๋ฃ๋์์ต๋๋ค.")
|
159 |
|
160 |
-
#
|
161 |
-
st.subheader("๐ ์ฑํฅ ๋ถํฌ ์๊ฐํ")
|
162 |
-
visualize_sentiment_distribution(sentiment_counts)
|
163 |
-
|
164 |
-
# ์์ธ ๋ถ์ ๊ฒฐ๊ณผ ์ถ๋ ฅ
|
165 |
-
st.subheader("๐ ์์ธ ๋ถ์ ๊ฒฐ๊ณผ")
|
166 |
for result in analysis_results:
|
167 |
-
st.
|
168 |
-
st.write(f"
|
169 |
-
st.write(f"
|
170 |
-
st.write(f"
|
171 |
-
st.write(f"
|
172 |
-
st.
|
|
|
|
|
|
|
173 |
else:
|
174 |
-
st.
|
|
|
5 |
from transformers import pipeline
|
6 |
import openai
|
7 |
import os
|
8 |
+
from sklearn.feature_extraction.text import TfidfVectorizer
|
9 |
+
from sklearn.linear_model import LogisticRegression
|
10 |
+
from sklearn.model_selection import train_test_split
|
11 |
+
from sklearn.metrics import accuracy_score
|
12 |
+
import joblib
|
13 |
|
14 |
# Streamlit ํ์ด์ง ์ค์ ์ ๊ฐ์ฅ ๋จผ์ ํธ์ถ
|
15 |
st.set_page_config(page_title="์ ์น์ ๊ด์ ๋ถ์", page_icon="๐ฐ", layout="wide")
|
|
|
59 |
st.error("๋ด์ค ๋ฐ์ดํฐ๋ฅผ ๋ถ๋ฌ์ค๋ ๋ฐ ์คํจํ์ต๋๋ค.")
|
60 |
return []
|
61 |
|
62 |
+
# ๋จธ์ ๋ฌ๋ ๋ชจ๋ธ ๋ก๋ ๋ฐ ํ์ต
|
63 |
+
def train_ml_model():
|
64 |
+
# ์ฌ๊ธฐ์๋ ์ํ ๋ฐ์ดํฐ๋ฅผ ์ฌ์ฉํ์ฌ ํ์ต
|
65 |
+
# ์ค์ ๋ฐ์ดํฐ๋ฅผ ์ด์ฉํ ํ์ต ๊ณผ์ ์ด ํ์ํฉ๋๋ค.
|
66 |
+
data = [
|
67 |
+
("์ง๋ณด์ ์ธ ์ ๋ถ ์ ์ฑ
์ ๊ฐํํด์ผ ํ๋ค", "LEFT"),
|
68 |
+
("๋ณด์์ ์ธ ๊ฒฝ์ ์ ์ฑ
์ด ํ์ํ๋ค", "RIGHT"),
|
69 |
+
("์ค๋ฆฝ์ ์ธ ์
์ฅ์์ ์ํฉ์ ํ๊ฐํ๋ค", "NEUTRAL")
|
70 |
+
]
|
71 |
+
texts, labels = zip(*data)
|
72 |
+
|
73 |
+
# TF-IDF ๋ฒกํฐํ
|
74 |
+
vectorizer = TfidfVectorizer(max_features=1000)
|
75 |
+
X = vectorizer.fit_transform(texts)
|
76 |
+
y = labels
|
77 |
+
|
78 |
+
# ํ๋ จ ๋ฐ ํ
์คํธ ๋ฐ์ดํฐ ๋๋๊ธฐ
|
79 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
80 |
+
|
81 |
+
# ๋ก์ง์คํฑ ํ๊ท ๋ชจ๋ธ ํ์ต
|
82 |
+
model = LogisticRegression()
|
83 |
+
model.fit(X_train, y_train)
|
84 |
+
|
85 |
+
# ๋ชจ๋ธ ์ฑ๋ฅ ํ๊ฐ
|
86 |
+
y_pred = model.predict(X_test)
|
87 |
+
accuracy = accuracy_score(y_test, y_pred)
|
88 |
+
st.write(f"๋ชจ๋ธ ์ ํ๋: {accuracy:.2f}")
|
89 |
+
|
90 |
+
# ๋ชจ๋ธ ์ ์ฅ
|
91 |
+
joblib.dump(model, 'political_bias_model.pkl')
|
92 |
+
joblib.dump(vectorizer, 'tfidf_vectorizer.pkl')
|
93 |
+
|
94 |
+
return model, vectorizer
|
95 |
+
|
96 |
+
# ๋ก๋๋ ๋จธ์ ๋ฌ๋ ๋ชจ๋ธ๋ก ์ฑํฅ ๋ถ์
|
97 |
+
def analyze_article_sentiment_ml(text, model, vectorizer):
|
98 |
+
X = vectorizer.transform([text])
|
99 |
+
prediction = model.predict(X)[0]
|
100 |
+
|
101 |
+
# ์ฑํฅ์ ๋ฐ๋ฅธ ๋ ์ด๋ธ ๋ฐํ
|
102 |
+
if prediction == "LEFT":
|
103 |
+
return "์ง๋ณด"
|
104 |
+
elif prediction == "RIGHT":
|
105 |
+
return "๋ณด์"
|
106 |
+
else:
|
107 |
+
return "์ค๋ฆฝ"
|
108 |
|
109 |
# GPT-4๋ฅผ ์ด์ฉํด ๋ฐ๋ ๊ด์ ๊ธฐ์ฌ ์์ฑ
|
110 |
def generate_article_gpt4(prompt):
|
|
|
123 |
except Exception as e:
|
124 |
return f"Error generating text: {e}"
|
125 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
126 |
# ์ ์น์ ๊ด์ ๋น๊ต ๋ฐ ๋ฐ๋ ๊ด์ ์์ฑ
|
127 |
+
def analyze_news_political_viewpoint(query, model, vectorizer):
|
128 |
# ๋ด์ค ๋ฐ์ดํฐ ๊ฐ์ ธ์ค๊ธฐ
|
129 |
news_items = fetch_naver_news(query)
|
130 |
if not news_items:
|
131 |
return [], {}
|
132 |
|
|
|
133 |
results = []
|
134 |
sentiment_counts = {"์ง๋ณด": 0, "๋ณด์": 0, "์ค๋ฆฝ": 0} # ๋งคํ๋ ๋ผ๋ฒจ์ ๋ง๊ฒ ์ด๊ธฐํ
|
135 |
|
|
|
139 |
link = item["link"] # ๋ด์ค ๋งํฌ ๊ฐ์ ธ์ค๊ธฐ
|
140 |
combined_text = f"{title}. {description}"
|
141 |
|
142 |
+
# ๋จธ์ ๋ฌ๋ ๋ชจ๋ธ์ ์ด์ฉํ ์ฑํฅ ๋ถ์
|
143 |
+
sentiment = analyze_article_sentiment_ml(combined_text, model, vectorizer)
|
144 |
sentiment_counts[sentiment] += 1 # ๋งคํ๋ ํค๋ก ์นด์ดํธ ์ฆ๊ฐ
|
145 |
|
146 |
# ๋ฐ๋ ๊ด์ ๊ธฐ์ฌ ์์ฑ
|
|
|
152 |
"์ ๋ชฉ": title,
|
153 |
"์๋ณธ ๊ธฐ์ฌ": description,
|
154 |
"์ฑํฅ": sentiment,
|
|
|
155 |
"๋์กฐ ๊ด์ ๊ธฐ์ฌ": opposite_article,
|
156 |
"๋ด์ค ๋งํฌ": link # ๋งํฌ ์ถ๊ฐ
|
157 |
})
|
|
|
177 |
st.title("๐ฐ ์ ์น์ ๊ด์ ๋น๊ต ๋ถ์ ๋๊ตฌ")
|
178 |
st.markdown("๋ด์ค ๊ธฐ์ฌ์ ์ ์น ์ฑํฅ ๋ถ์๊ณผ ๋ฐ๋ ๊ด์ ๊ธฐ์ฌ๋ฅผ ์์ฑํ์ฌ ๋น๊ตํฉ๋๋ค.")
|
179 |
|
180 |
+
# ๋จธ์ ๋ฌ๋ ๋ชจ๋ธ ๋ก๋
|
181 |
+
if not os.path.exists('political_bias_model.pkl'):
|
182 |
+
model, vectorizer = train_ml_model()
|
183 |
+
else:
|
184 |
+
model = joblib.load('political_bias_model.pkl')
|
185 |
+
vectorizer = joblib.load('tfidf_vectorizer.pkl')
|
186 |
+
|
187 |
# ์ฌ์ฉ์๋ก๋ถํฐ ๊ฒ์์ด ์
๋ ฅ ๋ฐ๊ธฐ
|
188 |
query = st.text_input("๊ฒ์ ํค์๋๋ฅผ ์
๋ ฅํ์ธ์", value="์ ์น")
|
189 |
|
190 |
# ๋ถ์ ์์ ๋ฒํผ
|
191 |
if st.button("๐ ๋ถ์ ์์"):
|
192 |
with st.spinner("๋ถ์ ์ค..."):
|
193 |
+
analysis_results, sentiment_counts = analyze_news_political_viewpoint(query, model, vectorizer)
|
194 |
|
195 |
if analysis_results:
|
196 |
st.success("๋ด์ค ๋ถ์์ด ์๋ฃ๋์์ต๋๋ค.")
|
197 |
|
198 |
+
# ๋ด์ค ๊ธฐ์ฌ ๋ชฉ๋ก ํ์
|
|
|
|
|
|
|
|
|
|
|
199 |
for result in analysis_results:
|
200 |
+
st.subheader(result["์ ๋ชฉ"])
|
201 |
+
st.write(f"์ฑํฅ: {result['์ฑํฅ']}")
|
202 |
+
st.write(f"๊ธฐ์ฌ: {result['์๋ณธ ๊ธฐ์ฌ']}")
|
203 |
+
st.write(f"[์๋ณธ ๊ธฐ์ฌ ๋ณด๊ธฐ]({result['๋ด์ค ๋งํฌ']})")
|
204 |
+
st.write(f"๋์กฐ ๊ด์ ๊ธฐ์ฌ: {result['๋์กฐ ๊ด์ ๊ธฐ์ฌ']}")
|
205 |
+
st.markdown("---")
|
206 |
+
|
207 |
+
# ์ฑํฅ ๋ถํฌ ์๊ฐํ
|
208 |
+
visualize_sentiment_distribution(sentiment_counts)
|
209 |
else:
|
210 |
+
st.warning("๊ฒ์๋ ๋ด์ค๊ฐ ์์ต๋๋ค.")
|