Spaces:
Sleeping
Sleeping
Update app.py
Browse filesfunction about comments
app.py
CHANGED
@@ -53,33 +53,63 @@ def movie_evaluation_predict(sentence):
|
|
53 |
data_x = sentence_convert_data(sentence)
|
54 |
predict = sentiment_model.predict(data_x)
|
55 |
predict_value = np.ravel(predict)
|
|
|
56 |
predict_answer = np.round(predict_value,0).item()
|
|
|
57 |
|
58 |
-
|
59 |
-
|
60 |
-
if predict_answer == 0:
|
61 |
-
st.write("(๋ถ์ ํ๋ฅ : %.2f) ๋ถ์ ์ ์ธ ์ํ ํ๊ฐ์
๋๋ค." % (1.0-predict_value))
|
62 |
-
elif predict_answer == 1:
|
63 |
-
st.write("(๊ธ์ ํ๋ฅ : %.2f) ๊ธ์ ์ ์ธ ์ํ ํ๊ฐ์
๋๋ค." % predict_value)
|
64 |
-
|
65 |
-
def scrape_content(url):
|
66 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
headers = {
|
68 |
-
|
|
|
69 |
}
|
70 |
-
# ์น ํ์ด์ง ์์ฒญ
|
71 |
-
response = requests.get(url, headers=headers)
|
72 |
-
soup = BeautifulSoup(response.content, 'html.parser')
|
73 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
# ๋ณธ๋ฌธ ์ถ์ถ
|
75 |
-
|
76 |
-
content
|
77 |
-
|
78 |
-
# ๋๊ธ ์ถ์ถ (์์)
|
79 |
-
comments = soup.find_all('span', class_='u_cbox_contents')
|
80 |
-
comment_list = [comment.get_text() for comment in comments]
|
81 |
|
82 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
|
84 |
|
85 |
def main():
|
@@ -88,19 +118,23 @@ def main():
|
|
88 |
st.title("์น ์ปจํ
์ธ ์คํฌ๋ํผ")
|
89 |
|
90 |
# URL ์
๋ ฅ ๋ฐ๊ธฐ
|
91 |
-
url = st.text_input("
|
92 |
|
93 |
if st.button("์คํฌ๋ฉ ์์"):
|
94 |
if url:
|
95 |
-
content, comments =
|
96 |
|
97 |
# ๊ฒฐ๊ณผ ํ์
|
|
|
|
|
|
|
98 |
st.subheader("๋ณธ๋ฌธ ๋ด์ฉ")
|
99 |
st.write(content)
|
100 |
|
101 |
st.subheader("๋๊ธ")
|
102 |
for comment in comments:
|
103 |
-
|
|
|
104 |
|
105 |
'''
|
106 |
test = st.form('test')
|
|
|
53 |
data_x = sentence_convert_data(sentence)
|
54 |
predict = sentiment_model.predict(data_x)
|
55 |
predict_value = np.ravel(predict)
|
56 |
+
# 0:๋ถ์ , 1:๊ธ์
|
57 |
predict_answer = np.round(predict_value,0).item()
|
58 |
+
return predict_answer
|
59 |
|
60 |
+
def get_comments(news_url):
|
61 |
+
# oid, aid ์ถ์ถ
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
|
63 |
+
list = news_url.split("/")
|
64 |
+
oid = list[-2]
|
65 |
+
aid = list[-1]
|
66 |
+
|
67 |
+
# API URL ๊ตฌ์ฑ
|
68 |
+
api_url = "https://apis.naver.com/commentBox/cbox/web_naver_list_jsonp.json"
|
69 |
+
params = {
|
70 |
+
"ticket": "news",
|
71 |
+
"templateId": "default_society",
|
72 |
+
"pool": "cbox5",
|
73 |
+
"lang": "ko",
|
74 |
+
"country": "KR",
|
75 |
+
"objectId": f"news{oid},{aid}",
|
76 |
+
"pageSize": 100,
|
77 |
+
"indexSize": 10,
|
78 |
+
"page": 1,
|
79 |
+
"sort": "FAVORITE" # 'NEW'(์ต์ ์), 'FAVORITE'(์๊ณต๊ฐ์)
|
80 |
+
}
|
81 |
+
|
82 |
headers = {
|
83 |
+
"User-Agent": "Mozilla/5.0",
|
84 |
+
"Referer": news_url
|
85 |
}
|
|
|
|
|
|
|
86 |
|
87 |
+
# API ํธ์ถ ๋ฐ ๋ฐ์ดํฐ ์ฒ๋ฆฌ
|
88 |
+
response = requests.get(api_url, params=params, headers=headers)
|
89 |
+
content = response.text.replace("_callback(", "").replace(");", "")
|
90 |
+
json_data = json.loads(content)
|
91 |
+
|
92 |
+
response = requests.get(news_url)
|
93 |
+
soup = BeautifulSoup(response.text, "html.parser")
|
94 |
+
|
95 |
+
# ์ ๋ชฉ ์ถ์ถ
|
96 |
+
title = article_soup.select_one("#ct > div.media_end_head.go_trans > div.media_end_head_title > h2")
|
97 |
+
if title is None:
|
98 |
+
title = article_soup.select_one("#content > div.end_ct > div > h2")
|
99 |
+
|
100 |
# ๋ณธ๋ฌธ ์ถ์ถ
|
101 |
+
content = article_soup.select_one("#dic_area")
|
102 |
+
if content is None:
|
103 |
+
content = article_soup.select_one("#articeBody")
|
|
|
|
|
|
|
104 |
|
105 |
+
return title, article, processing_data(json_data['result']['commentList'])
|
106 |
+
|
107 |
+
def processing_data(comments):
|
108 |
+
comment_list = []
|
109 |
+
for comment in comments:
|
110 |
+
comment_list.append(comment['contents'])
|
111 |
+
comment_listR = [x for x in comment_list if x]
|
112 |
+
return comment_listR
|
113 |
|
114 |
|
115 |
def main():
|
|
|
118 |
st.title("์น ์ปจํ
์ธ ์คํฌ๋ํผ")
|
119 |
|
120 |
# URL ์
๋ ฅ ๋ฐ๊ธฐ
|
121 |
+
url = st.text_input("url์ ์
๋ ฅํ์ธ์")
|
122 |
|
123 |
if st.button("์คํฌ๋ฉ ์์"):
|
124 |
if url:
|
125 |
+
title, content, comments = get_comments(url)
|
126 |
|
127 |
# ๊ฒฐ๊ณผ ํ์
|
128 |
+
st.subheader("๊ธฐ์ฌ ์ ๋ชฉ")
|
129 |
+
st.write(title)
|
130 |
+
|
131 |
st.subheader("๋ณธ๋ฌธ ๋ด์ฉ")
|
132 |
st.write(content)
|
133 |
|
134 |
st.subheader("๋๊ธ")
|
135 |
for comment in comments:
|
136 |
+
if movie_evaluation_predict(comment) == 1:
|
137 |
+
st.write(comment)
|
138 |
|
139 |
'''
|
140 |
test = st.form('test')
|