haotle commited on
Commit
e3473f2
·
verified ·
1 Parent(s): 8a2dc42

Update pages/2 Topic Modeling.py

Browse files
Files changed (1) hide show
  1. pages/2 Topic Modeling.py +674 -674
pages/2 Topic Modeling.py CHANGED
@@ -1,674 +1,674 @@
1
- #import module
2
- import streamlit as st
3
- import streamlit.components.v1 as components
4
- import pandas as pd
5
- import numpy as np
6
- import re
7
- import string
8
- import nltk
9
- nltk.download('wordnet')
10
- from nltk.stem import WordNetLemmatizer
11
- nltk.download('stopwords')
12
- from nltk.corpus import stopwords
13
- import gensim
14
- import gensim.corpora as corpora
15
- from gensim.corpora import Dictionary
16
- from gensim.models.coherencemodel import CoherenceModel
17
- from gensim.models.ldamodel import LdaModel
18
- from gensim.models import Phrases
19
- from gensim.models.phrases import Phraser
20
- from pprint import pprint
21
- import pickle
22
- import pyLDAvis
23
- import pyLDAvis.gensim_models as gensimvis
24
- from io import StringIO
25
- from ipywidgets.embed import embed_minimal_html
26
- from nltk.stem.snowball import SnowballStemmer
27
- from bertopic import BERTopic
28
- from bertopic.representation import KeyBERTInspired, MaximalMarginalRelevance, OpenAI, TextGeneration
29
- import plotly.express as px
30
- from sklearn.cluster import KMeans
31
- from sklearn.feature_extraction.text import CountVectorizer
32
- import bitermplus as btm
33
- import tmplot as tmp
34
- import tomotopy
35
- import sys
36
- import spacy
37
- import en_core_web_sm
38
- import pipeline
39
- from html2image import Html2Image
40
- from umap import UMAP
41
- import os
42
- import time
43
- import json
44
- from tools import sourceformat as sf
45
- import datamapplot
46
- from sentence_transformers import SentenceTransformer
47
- import openai
48
- from transformers import pipeline
49
-
50
- #===config===
51
- st.set_page_config(
52
- page_title="Coconut",
53
- page_icon="🥥",
54
- layout="wide",
55
- initial_sidebar_state="collapsed"
56
- )
57
-
58
- hide_streamlit_style = """
59
- <style>
60
- #MainMenu
61
- {visibility: hidden;}
62
- footer {visibility: hidden;}
63
- [data-testid="collapsedControl"] {display: none}
64
- </style>
65
- """
66
- st.markdown(hide_streamlit_style, unsafe_allow_html=True)
67
-
68
- with st.popover("🔗 Menu"):
69
- st.page_link("https://www.coconut-libtool.com/", label="Home", icon="🏠")
70
- st.page_link("pages/1 Scattertext.py", label="Scattertext", icon="1️⃣")
71
- st.page_link("pages/2 Topic Modeling.py", label="Topic Modeling", icon="2️⃣")
72
- st.page_link("pages/3 Bidirected Network.py", label="Bidirected Network", icon="3️⃣")
73
- st.page_link("pages/4 Sunburst.py", label="Sunburst", icon="4️⃣")
74
- st.page_link("pages/5 Burst Detection.py", label="Burst Detection", icon="5️⃣")
75
- st.page_link("pages/6 Keywords Stem.py", label="Keywords Stem", icon="6️⃣")
76
- st.page_link("pages/7 Sentiment Analysis.py", label="Sentiment Analysis", icon="7️⃣")
77
-
78
- st.header("Topic Modeling", anchor=False)
79
- st.subheader('Put your file here...', anchor=False)
80
-
81
- #========unique id========
82
- @st.cache_resource(ttl=3600)
83
- def create_list():
84
- l = [1, 2, 3]
85
- return l
86
-
87
- l = create_list()
88
- first_list_value = l[0]
89
- l[0] = first_list_value + 1
90
- uID = str(l[0])
91
-
92
- @st.cache_data(ttl=3600)
93
- def get_ext(uploaded_file):
94
- extype = uID+uploaded_file.name
95
- return extype
96
-
97
- #===clear cache===
98
-
99
- def reset_biterm():
100
- try:
101
- biterm_map.clear()
102
- biterm_bar.clear()
103
- except NameError:
104
- biterm_topic.clear()
105
-
106
- def reset_all():
107
- st.cache_data.clear()
108
-
109
- #===avoiding deadlock===
110
- os.environ["TOKENIZERS_PARALLELISM"] = "false"
111
-
112
- #===upload file===
113
- @st.cache_data(ttl=3600)
114
- def upload(file):
115
- papers = pd.read_csv(uploaded_file)
116
- if "About the data" in papers.columns[0]:
117
- papers = sf.dim(papers)
118
- col_dict = {'MeSH terms': 'Keywords',
119
- 'PubYear': 'Year',
120
- 'Times cited': 'Cited by',
121
- 'Publication Type': 'Document Type'
122
- }
123
- papers.rename(columns=col_dict, inplace=True)
124
-
125
- return papers
126
-
127
- @st.cache_data(ttl=3600)
128
- def conv_txt(extype):
129
- if("PMID" in (uploaded_file.read()).decode()):
130
- uploaded_file.seek(0)
131
- papers = sf.medline(uploaded_file)
132
- print(papers)
133
- return papers
134
- col_dict = {'TI': 'Title',
135
- 'SO': 'Source title',
136
- 'DE': 'Author Keywords',
137
- 'DT': 'Document Type',
138
- 'AB': 'Abstract',
139
- 'TC': 'Cited by',
140
- 'PY': 'Year',
141
- 'ID': 'Keywords Plus',
142
- 'rights_date_used': 'Year'}
143
- uploaded_file.seek(0)
144
- papers = pd.read_csv(uploaded_file, sep='\t')
145
- if("htid" in papers.columns):
146
- papers = sf.htrc(papers)
147
- papers.rename(columns=col_dict, inplace=True)
148
- print(papers)
149
- return papers
150
-
151
-
152
- @st.cache_data(ttl=3600)
153
- def conv_json(extype):
154
- col_dict={'title': 'title',
155
- 'rights_date_used': 'Year',
156
- }
157
-
158
- data = json.load(uploaded_file)
159
- hathifile = data['gathers']
160
- keywords = pd.DataFrame.from_records(hathifile)
161
-
162
- keywords = sf.htrc(keywords)
163
- keywords.rename(columns=col_dict,inplace=True)
164
- return keywords
165
-
166
- @st.cache_resource(ttl=3600)
167
- def conv_pub(extype):
168
- if (get_ext(extype)).endswith('.tar.gz'):
169
- bytedata = extype.read()
170
- keywords = sf.readPub(bytedata)
171
- elif (get_ext(extype)).endswith('.xml'):
172
- bytedata = extype.read()
173
- keywords = sf.readxml(bytedata)
174
- return keywords
175
-
176
- #===Read data===
177
- uploaded_file = st.file_uploader('', type=['csv', 'txt','json','tar.gz','xml'], on_change=reset_all)
178
-
179
- if uploaded_file is not None:
180
- try:
181
- extype = get_ext(uploaded_file)
182
-
183
- if extype.endswith('.csv'):
184
- papers = upload(extype)
185
- elif extype.endswith('.txt'):
186
- papers = conv_txt(extype)
187
-
188
- elif extype.endswith('.json'):
189
- papers = conv_json(extype)
190
- elif extype.endswith('.tar.gz') or extype.endswith('.xml'):
191
- papers = conv_pub(uploaded_file)
192
-
193
- coldf = sorted(papers.select_dtypes(include=['object']).columns.tolist())
194
-
195
- c1, c2, c3 = st.columns([3,3,4])
196
- method = c1.selectbox(
197
- 'Choose method',
198
- ('Choose...', 'pyLDA', 'Biterm', 'BERTopic'))
199
- ColCho = c2.selectbox('Choose column', (["Title","Abstract"]))
200
- num_cho = c3.number_input('Choose number of topics', min_value=2, max_value=30, value=5)
201
-
202
- d1, d2 = st.columns([3,7])
203
- xgram = d1.selectbox("N-grams", ("1", "2", "3"))
204
- xgram = int(xgram)
205
- words_to_remove = d2.text_input("Remove specific words. Separate words by semicolons (;)")
206
-
207
- rem_copyright = d1.toggle('Remove copyright statement', value=True)
208
- rem_punc = d2.toggle('Remove punctuation', value=True)
209
-
210
- #===advance settings===
211
- with st.expander("🧮 Show advance settings"):
212
- t1, t2, t3 = st.columns([3,3,4])
213
- if method == 'pyLDA':
214
- py_random_state = t1.number_input('Random state', min_value=0, max_value=None, step=1)
215
- py_chunksize = t2.number_input('Chunk size', value=100 , min_value=10, max_value=None, step=1)
216
- opt_threshold = t3.number_input('Threshold', value=100 , min_value=1, max_value=None, step=1)
217
-
218
- elif method == 'Biterm':
219
- btm_seed = t1.number_input('Random state seed', value=100 , min_value=1, max_value=None, step=1)
220
- btm_iterations = t2.number_input('Iterations number', value=20 , min_value=2, max_value=None, step=1)
221
- opt_threshold = t3.number_input('Threshold', value=100 , min_value=1, max_value=None, step=1)
222
-
223
- elif method == 'BERTopic':
224
- u1, u2 = st.columns([5,5])
225
-
226
- bert_top_n_words = u1.number_input('top_n_words', value=5 , min_value=5, max_value=25, step=1)
227
- bert_random_state = u2.number_input('random_state', value=42 , min_value=1, max_value=None, step=1)
228
- bert_n_components = u1.number_input('n_components', value=5 , min_value=1, max_value=None, step=1)
229
- bert_n_neighbors = u2.number_input('n_neighbors', value=15 , min_value=1, max_value=None, step=1)
230
- bert_embedding_model = st.radio(
231
- "embedding_model",
232
- ["all-MiniLM-L6-v2", "paraphrase-multilingual-MiniLM-L12-v2", "en_core_web_sm"], index=0, horizontal=True)
233
-
234
- fine_tuning = st.toggle("Use Fine-tuning")
235
- if fine_tuning:
236
- topic_labelling = st.toggle("Automatic topic labelling")
237
- if topic_labelling:
238
- llm_model = st.selectbox("Model",["OpenAI/gpt-4o","Google/Flan-t5","OpenAI/gpt-oss"])
239
- if llm_model == "OpenAI/gpt-4o":
240
- api_key = st.text_input("API Key")
241
-
242
- else:
243
- st.write('Please choose your preferred method')
244
-
245
- #===clean csv===
246
- @st.cache_data(ttl=3600, show_spinner=False)
247
- def clean_csv(extype):
248
- paper = papers.dropna(subset=[ColCho])
249
-
250
- #===mapping===
251
- paper['Abstract_pre'] = paper[ColCho].map(lambda x: x.lower())
252
- if rem_punc:
253
- paper['Abstract_pre'] = paper['Abstract_pre'].map(
254
- lambda x: re.sub(f"[{re.escape(string.punctuation)}]", " ", x)
255
- ).map(lambda x: re.sub(r"\s+", " ", x).strip())
256
- paper['Abstract_pre'] = paper['Abstract_pre'].str.replace('[\u2018\u2019\u201c\u201d]', '', regex=True)
257
- if rem_copyright:
258
- paper['Abstract_pre'] = paper['Abstract_pre'].map(lambda x: re.sub('©.*', '', x))
259
-
260
- #===stopword removal===
261
- stop = stopwords.words('english')
262
- paper['Abstract_stop'] = paper['Abstract_pre'].apply(lambda x: ' '.join([word for word in x.split() if word not in (stop)]))
263
-
264
- #===lemmatize===
265
- lemmatizer = WordNetLemmatizer()
266
-
267
- @st.cache_resource(ttl=3600)
268
- def lemmatize_words(text):
269
- words = text.split()
270
- words = [lemmatizer.lemmatize(word) for word in words]
271
- return ' '.join(words)
272
- paper['Abstract_lem'] = paper['Abstract_stop'].apply(lemmatize_words)
273
-
274
- words_rmv = [word.strip() for word in words_to_remove.split(";")]
275
- remove_dict = {word: None for word in words_rmv}
276
-
277
- @st.cache_resource(ttl=3600)
278
- def remove_words(text):
279
- words = text.split()
280
- cleaned_words = [word for word in words if word not in remove_dict]
281
- return ' '.join(cleaned_words)
282
- paper['Abstract_lem'] = paper['Abstract_lem'].map(remove_words)
283
-
284
- topic_abs = paper.Abstract_lem.values.tolist()
285
- return topic_abs, paper
286
-
287
- topic_abs, paper=clean_csv(extype)
288
-
289
- if st.button("Submit", on_click=reset_all):
290
- num_topic = num_cho
291
-
292
- if method == 'BERTopic':
293
- st.info('BERTopic is an expensive process when dealing with a large volume of text with our existing resources. Please kindly wait until the visualization appears.', icon="ℹ️")
294
-
295
- #===topic===
296
- if method == 'Choose...':
297
- st.write('')
298
-
299
- elif method == 'pyLDA':
300
- tab1, tab2, tab3, tab4 = st.tabs(["📈 Generate visualization", "📃 Reference", "📓 Recommended Reading", "⬇️ Download Help"])
301
-
302
- with tab1:
303
- #===visualization===
304
- @st.cache_data(ttl=3600, show_spinner=False)
305
- def pylda(extype):
306
- topic_abs_LDA = [t.split(' ') for t in topic_abs]
307
-
308
- bigram = Phrases(topic_abs_LDA, min_count=xgram, threshold=opt_threshold)
309
- trigram = Phrases(bigram[topic_abs_LDA], threshold=opt_threshold)
310
- bigram_mod = Phraser(bigram)
311
- trigram_mod = Phraser(trigram)
312
-
313
- topic_abs_LDA = [trigram_mod[bigram_mod[doc]] for doc in topic_abs_LDA]
314
-
315
- id2word = Dictionary(topic_abs_LDA)
316
- corpus = [id2word.doc2bow(text) for text in topic_abs_LDA]
317
- #===LDA===
318
- lda_model = LdaModel(corpus=corpus,
319
- id2word=id2word,
320
- num_topics=num_topic,
321
- random_state=py_random_state,
322
- chunksize=py_chunksize,
323
- alpha='auto',
324
- per_word_topics=False)
325
- pprint(lda_model.print_topics())
326
- doc_lda = lda_model[corpus]
327
- topics = lda_model.show_topics(num_words = 30,formatted=False)
328
-
329
- #===visualization===
330
- coherence_model_lda = CoherenceModel(model=lda_model, texts=topic_abs_LDA, dictionary=id2word, coherence='c_v')
331
- coherence_lda = coherence_model_lda.get_coherence()
332
- vis = pyLDAvis.gensim_models.prepare(lda_model, corpus, id2word)
333
- py_lda_vis_html = pyLDAvis.prepared_data_to_html(vis)
334
- return py_lda_vis_html, coherence_lda, vis, topics
335
-
336
- with st.spinner('Performing computations. Please wait ...'):
337
- try:
338
- py_lda_vis_html, coherence_lda, vis, topics = pylda(extype)
339
- st.write('Coherence score: ', coherence_lda)
340
- components.html(py_lda_vis_html, width=1500, height=800)
341
- st.markdown('Copyright (c) 2015, Ben Mabey. https://github.com/bmabey/pyLDAvis')
342
-
343
- @st.cache_data(ttl=3600, show_spinner=False)
344
- def img_lda(vis):
345
- pyLDAvis.save_html(vis, 'output.html')
346
- hti = Html2Image()
347
- hti.browser.flags = ['--default-background-color=ffffff', '--hide-scrollbars']
348
- hti.browser.use_new_headless = None
349
- css = "body {background: white;}"
350
- hti.screenshot(
351
- other_file='output.html', css_str=css, size=(1500, 800),
352
- save_as='ldavis_img.png'
353
- )
354
-
355
- img_lda(vis)
356
-
357
- d1, d2 = st.columns(2)
358
- with open("ldavis_img.png", "rb") as file:
359
- btn = d1.download_button(
360
- label="Download image",
361
- data=file,
362
- file_name="ldavis_img.png",
363
- mime="image/png"
364
- )
365
-
366
- #===download results===#
367
- resultf = pd.DataFrame(topics)
368
- #formatting
369
- resultf = resultf.transpose()
370
- resultf = resultf.drop([0])
371
- resultf = resultf.explode(list(range(len(resultf.columns))), ignore_index=False)
372
-
373
- resultcsv = resultf.to_csv().encode("utf-8")
374
- d2.download_button(
375
- label = "Download Results",
376
- data=resultcsv,
377
- file_name="results.csv",
378
- mime="text\csv",
379
- on_click="ignore")
380
-
381
- except NameError as f:
382
- st.warning('🖱️ Please click Submit')
383
-
384
- with tab2:
385
- st.markdown('**Sievert, C., & Shirley, K. (2014). LDAvis: A method for visualizing and interpreting topics. Proceedings of the Workshop on Interactive Language Learning, Visualization, and Interfaces.** https://doi.org/10.3115/v1/w14-3110')
386
-
387
- with tab3:
388
- st.markdown('**Chen, X., & Wang, H. (2019, January). Automated chat transcript analysis using topic modeling for library reference services. Proceedings of the Association for Information Science and Technology, 56(1), 368–371.** https://doi.org/10.1002/pra2.31')
389
- st.markdown('**Joo, S., Ingram, E., & Cahill, M. (2021, December 15). Exploring Topics and Genres in Storytime Books: A Text Mining Approach. Evidence Based Library and Information Practice, 16(4), 41–62.** https://doi.org/10.18438/eblip29963')
390
- st.markdown('**Lamba, M., & Madhusudhan, M. (2021, July 31). Topic Modeling. Text Mining for Information Professionals, 105–137.** https://doi.org/10.1007/978-3-030-85085-2_4')
391
- st.markdown('**Lamba, M., & Madhusudhan, M. (2019, June 7). Mapping of topics in DESIDOC Journal of Library and Information Technology, India: a study. Scientometrics, 120(2), 477–505.** https://doi.org/10.1007/s11192-019-03137-5')
392
-
393
- with tab4:
394
- st.subheader(':blue[pyLDA]', anchor=False)
395
- st.button('Download image')
396
- st.text("Click Download Image button.")
397
- st.divider()
398
- st.subheader(':blue[Downloading CSV Results]', anchor=False)
399
- st.button("Download Results")
400
- st.text("Click Download results button at bottom of page")
401
-
402
- #===Biterm===
403
- elif method == 'Biterm':
404
-
405
- #===optimize Biterm===
406
- @st.cache_data(ttl=3600, show_spinner=False)
407
- def biterm_topic(extype):
408
- tokenized_abs = [t.split(' ') for t in topic_abs]
409
-
410
- bigram = Phrases(tokenized_abs, min_count=xgram, threshold=opt_threshold)
411
- trigram = Phrases(bigram[tokenized_abs], threshold=opt_threshold)
412
- bigram_mod = Phraser(bigram)
413
- trigram_mod = Phraser(trigram)
414
-
415
- topic_abs_ngram = [trigram_mod[bigram_mod[doc]] for doc in tokenized_abs]
416
-
417
- topic_abs_str = [' '.join(doc) for doc in topic_abs_ngram]
418
-
419
-
420
- X, vocabulary, vocab_dict = btm.get_words_freqs(topic_abs_str)
421
- tf = np.array(X.sum(axis=0)).ravel()
422
- docs_vec = btm.get_vectorized_docs(topic_abs, vocabulary)
423
- docs_lens = list(map(len, docs_vec))
424
- biterms = btm.get_biterms(docs_vec)
425
-
426
- model = btm.BTM(X, vocabulary, seed=btm_seed, T=num_topic, M=20, alpha=50/8, beta=0.01)
427
- model.fit(biterms, iterations=btm_iterations)
428
-
429
- p_zd = model.transform(docs_vec)
430
- coherence = model.coherence_
431
- phi = tmp.get_phi(model)
432
- topics_coords = tmp.prepare_coords(model)
433
- totaltop = topics_coords.label.values.tolist()
434
- perplexity = model.perplexity_
435
- top_topics = model.df_words_topics_
436
-
437
- return topics_coords, phi, totaltop, perplexity, top_topics
438
-
439
- tab1, tab2, tab3, tab4 = st.tabs(["📈 Generate visualization", "📃 Reference", "📓 Recommended Reading", "⬇️ Download Help"])
440
- with tab1:
441
- try:
442
- with st.spinner('Performing computations. Please wait ...'):
443
- topics_coords, phi, totaltop, perplexity, top_topics = biterm_topic(extype)
444
- col1, col2 = st.columns([4,6])
445
-
446
- @st.cache_data(ttl=3600)
447
- def biterm_map(extype):
448
- btmvis_coords = tmp.plot_scatter_topics(topics_coords, size_col='size', label_col='label', topic=numvis)
449
- return btmvis_coords
450
-
451
- @st.cache_data(ttl=3600)
452
- def biterm_bar(extype):
453
- terms_probs = tmp.calc_terms_probs_ratio(phi, topic=numvis, lambda_=1)
454
- btmvis_probs = tmp.plot_terms(terms_probs, font_size=12)
455
- return btmvis_probs
456
-
457
- with col1:
458
- st.write('Perplexity score: ', perplexity)
459
- st.write('')
460
- numvis = st.selectbox(
461
- 'Choose topic',
462
- (totaltop), on_change=reset_biterm)
463
- btmvis_coords = biterm_map(extype)
464
- st.altair_chart(btmvis_coords)
465
- with col2:
466
- btmvis_probs = biterm_bar(extype)
467
- st.altair_chart(btmvis_probs, use_container_width=True)
468
-
469
- #===download results===#
470
- resultcsv = top_topics.to_csv().encode("utf-8")
471
- st.download_button(label = "Download Results", data=resultcsv, file_name="results.csv", mime="text\csv", on_click="ignore")
472
-
473
- except ValueError as g:
474
- st.error('🙇‍♂️ Please raise the number of topics and click submit')
475
-
476
- except NameError as f:
477
- st.warning('🖱️ Please click Submit')
478
-
479
- with tab2:
480
- st.markdown('**Yan, X., Guo, J., Lan, Y., & Cheng, X. (2013, May 13). A biterm topic model for short texts. Proceedings of the 22nd International Conference on World Wide Web.** https://doi.org/10.1145/2488388.2488514')
481
- with tab3:
482
- st.markdown('**Cai, M., Shah, N., Li, J., Chen, W. H., Cuomo, R. E., Obradovich, N., & Mackey, T. K. (2020, August 26). Identification and characterization of tweets related to the 2015 Indiana HIV outbreak: A retrospective infoveillance study. PLOS ONE, 15(8), e0235150.** https://doi.org/10.1371/journal.pone.0235150')
483
- st.markdown('**Chen, Y., Dong, T., Ban, Q., & Li, Y. (2021). What Concerns Consumers about Hypertension? A Comparison between the Online Health Community and the Q&A Forum. International Journal of Computational Intelligence Systems, 14(1), 734.** https://doi.org/10.2991/ijcis.d.210203.002')
484
- st.markdown('**George, Crissandra J., "AMBIGUOUS APPALACHIANNESS: A LINGUISTIC AND PERCEPTUAL INVESTIGATION INTO ARC-LABELED PENNSYLVANIA COUNTIES" (2022). Theses and Dissertations-- Linguistics. 48.** https://doi.org/10.13023/etd.2022.217')
485
- st.markdown('**Li, J., Chen, W. H., Xu, Q., Shah, N., Kohler, J. C., & Mackey, T. K. (2020). Detection of self-reported experiences with corruption on twitter using unsupervised machine learning. Social Sciences & Humanities Open, 2(1), 100060.** https://doi.org/10.1016/j.ssaho.2020.100060')
486
- with tab4:
487
- st.subheader(':blue[Biterm]', anchor=False)
488
- st.text("Click the three dots at the top right then select the desired format.")
489
- st.markdown("![Downloading visualization](https://raw.githubusercontent.com/faizhalas/library-tools/main/images/download_biterm.jpg)")
490
- st.divider()
491
- st.subheader(':blue[Downloading CSV Results]', anchor=False)
492
- st.button("Download Results")
493
- st.text("Click Download results button at bottom of page")
494
-
495
-
496
- #===BERTopic===
497
- elif method == 'BERTopic':
498
- @st.cache_resource(ttl = 3600, show_spinner=False)
499
- #@st.cache_data(ttl=3600, show_spinner=False)
500
- def bertopic_vis(extype):
501
- umap_model = UMAP(n_neighbors=bert_n_neighbors, n_components=bert_n_components,
502
- min_dist=0.0, metric='cosine', random_state=bert_random_state)
503
- cluster_model = KMeans(n_clusters=num_topic)
504
- if bert_embedding_model == 'all-MiniLM-L6-v2':
505
- model = SentenceTransformer('all-MiniLM-L6-v2')
506
- lang = 'en'
507
- embeddings = model.encode(topic_abs, show_progress_bar=True)
508
-
509
- elif bert_embedding_model == 'en_core_web_sm':
510
- nlp = en_core_web_sm.load(exclude=['tagger', 'parser', 'ner', 'attribute_ruler', 'lemmatizer'])
511
- model = nlp
512
- lang = 'en'
513
- embeddings = np.array([nlp(text).vector for text in topic_abs])
514
-
515
- elif bert_embedding_model == 'paraphrase-multilingual-MiniLM-L12-v2':
516
- model = SentenceTransformer('paraphrase-multilingual-MiniLM-L12-v2')
517
- lang = 'multilingual'
518
- embeddings = model.encode(topic_abs, show_progress_bar=True)
519
-
520
- representation_model = ""
521
-
522
- if fine_tuning:
523
- keybert = KeyBERTInspired()
524
- mmr = MaximalMarginalRelevance(diversity=0.3)
525
- representation_model = {
526
- "KeyBERT": keybert,
527
- "MMR": mmr,
528
- }
529
- if topic_labelling:
530
- if llm_model == "OpenAI/gpt-4o":
531
- client = openai.OpenAI(api_key=api_key)
532
- representation_model = {
533
- "KeyBERT": keybert,
534
- "MMR": mmr,
535
- "test": OpenAI(client, model = "gpt-4o-mini", delay_in_seconds=10)
536
- }
537
- elif llm_model == "Google/Flan-t5":
538
- gen = pipeline("text2text-generation", model = "google/flan-t5-base")
539
- clientmod = TextGeneration(gen)
540
- representation_model = {
541
- "KeyBERT": keybert,
542
- "MMR": mmr,
543
- "test": clientmod
544
- }
545
- elif llm_model == "OpenAI/gpt-oss":
546
- gen = pipeline("text-generation",
547
- model = "openai/gpt-oss-20b",
548
- torch_dtype = "auto",
549
- device_map = "auto",
550
- )
551
- clientmod = TextGeneration(gen)
552
-
553
- representation_model = {
554
- "KeyBERT": keybert,
555
- "MMR": mmr,
556
- "test": gen
557
- }
558
-
559
-
560
-
561
- vectorizer_model = CountVectorizer(ngram_range=(1, xgram), stop_words='english')
562
- topic_model = BERTopic(representation_model = representation_model, embedding_model=model, hdbscan_model=cluster_model, language=lang, umap_model=umap_model, vectorizer_model=vectorizer_model, top_n_words=bert_top_n_words)
563
- topics, probs = topic_model.fit_transform(topic_abs, embeddings=embeddings)
564
-
565
- if(fine_tuning and topic_labelling):
566
- generated_labels = [label[0][0].split("\n")[0] for label in topic_model.get_topics(full=True)["test"].values()]
567
- topic_model.set_topic_labels(generated_labels)
568
-
569
- return topic_model, topics, probs, embeddings
570
-
571
- @st.cache_resource(ttl = 3600, show_spinner=False)
572
- def Vis_Topics(extype):
573
- fig1 = topic_model.visualize_topics()
574
- return fig1
575
- @st.cache_resource(ttl = 3600, show_spinner=False)
576
- def Vis_Documents(extype):
577
- fig2 = topic_model.visualize_document_datamap(topic_abs, embeddings=embeddings, custom_labels = True)
578
- return fig2
579
- @st.cache_resource(ttl = 3600, show_spinner=False)
580
- def Vis_Hierarchy(extype):
581
- fig3 = topic_model.visualize_hierarchy(top_n_topics=num_topic, custom_labels = True)
582
- return fig3
583
- @st.cache_resource(ttl = 3600, show_spinner=False)
584
- def Vis_Heatmap(extype):
585
- global topic_model
586
- fig4 = topic_model.visualize_heatmap(n_clusters=num_topic-1, width=1000, height=1000, custom_labels = True)
587
- return fig4
588
- @st.cache_resource(ttl = 3600, show_spinner=False)
589
- def Vis_Barchart(extype):
590
- fig5 = topic_model.visualize_barchart(top_n_topics=num_topic, custom_labels = True)
591
- return fig5
592
-
593
- tab1, tab2, tab3, tab4 = st.tabs(["📈 Generate visualization", "📃 Reference", "📓 Recommended Reading", "⬇️ Download Help"])
594
- with tab1:
595
- try:
596
- with st.spinner('Performing computations. Please wait ...'):
597
-
598
- topic_model, topics, probs, embeddings = bertopic_vis(extype)
599
- time.sleep(.5)
600
- st.toast('Visualize Topics', icon='🏃')
601
- fig1 = Vis_Topics(extype)
602
-
603
- time.sleep(.5)
604
- st.toast('Visualize Document', icon='🏃')
605
- fig2 = Vis_Documents(extype)
606
-
607
- time.sleep(.5)
608
- st.toast('Visualize Document Hierarchy', icon='🏃')
609
- fig3 = Vis_Hierarchy(extype)
610
-
611
- time.sleep(.5)
612
- st.toast('Visualize Topic Similarity', icon='🏃')
613
- fig4 = Vis_Heatmap(extype)
614
-
615
- time.sleep(.5)
616
- st.toast('Visualize Terms', icon='🏃')
617
- fig5 = Vis_Barchart(extype)
618
-
619
- bertab1, bertab2, bertab3, bertab4, bertab5 = st.tabs(["Visualize Topics", "Visualize Terms", "Visualize Documents",
620
- "Visualize Document Hierarchy", "Visualize Topic Similarity"])
621
-
622
- with bertab1:
623
- st.plotly_chart(fig1, use_container_width=True)
624
- with bertab2:
625
- st.plotly_chart(fig5, use_container_width=True)
626
- with bertab3:
627
- st.plotly_chart(fig2, use_container_width=True)
628
- with bertab4:
629
- st.plotly_chart(fig3, use_container_width=True)
630
- with bertab5:
631
- st.plotly_chart(fig4, use_container_width=True)
632
-
633
- #===download results===#
634
- results = topic_model.get_topic_info()
635
- resultf = pd.DataFrame(results)
636
- resultcsv = resultf.to_csv().encode("utf-8")
637
- st.download_button(
638
- label = "Download Results",
639
- data=resultcsv,
640
- file_name="results.csv",
641
- mime="text\csv",
642
- on_click="ignore",
643
- )
644
-
645
- except ValueError as e:
646
- st.write(e)
647
- st.error('🙇‍♂️ Please raise the number of topics and click submit')
648
-
649
-
650
- except NameError as e:
651
- st.warning('🖱️ Please click Submit')
652
- st.write(e)
653
-
654
- with tab2:
655
- st.markdown('**Grootendorst, M. (2022). BERTopic: Neural topic modeling with a class-based TF-IDF procedure. arXiv preprint arXiv:2203.05794.** https://doi.org/10.48550/arXiv.2203.05794')
656
-
657
- with tab3:
658
- st.markdown('**Jeet Rawat, A., Ghildiyal, S., & Dixit, A. K. (2022, December 1). Topic modelling of legal documents using NLP and bidirectional encoder representations from transformers. Indonesian Journal of Electrical Engineering and Computer Science, 28(3), 1749.** https://doi.org/10.11591/ijeecs.v28.i3.pp1749-1755')
659
- st.markdown('**Yao, L. F., Ferawati, K., Liew, K., Wakamiya, S., & Aramaki, E. (2023, April 20). Disruptions in the Cystic Fibrosis Community’s Experiences and Concerns During the COVID-19 Pandemic: Topic Modeling and Time Series Analysis of Reddit Comments. Journal of Medical Internet Research, 25, e45249.** https://doi.org/10.2196/45249')
660
-
661
- with tab4:
662
- st.divider()
663
- st.subheader(':blue[BERTopic]', anchor=False)
664
- st.text("Click the camera icon on the top right menu")
665
- st.markdown("![Downloading visualization](https://raw.githubusercontent.com/faizhalas/library-tools/main/images/download_bertopic.jpg)")
666
- st.divider()
667
- st.subheader(':blue[Downloading CSV Results]', anchor=False)
668
- st.button("Download Results")
669
- st.text("Click Download results button at bottom of page")
670
-
671
- except Exception as e:
672
- st.error("Please ensure that your file is correct. Please contact us if you find that this is an error.", icon="🚨")
673
- st.write(e)
674
- st.stop()
 
1
+ #import module
2
+ import streamlit as st
3
+ import streamlit.components.v1 as components
4
+ import pandas as pd
5
+ import numpy as np
6
+ import re
7
+ import string
8
+ import nltk
9
+ nltk.download('wordnet')
10
+ from nltk.stem import WordNetLemmatizer
11
+ nltk.download('stopwords')
12
+ from nltk.corpus import stopwords
13
+ import gensim
14
+ import gensim.corpora as corpora
15
+ from gensim.corpora import Dictionary
16
+ from gensim.models.coherencemodel import CoherenceModel
17
+ from gensim.models.ldamodel import LdaModel
18
+ from gensim.models import Phrases
19
+ from gensim.models.phrases import Phraser
20
+ from pprint import pprint
21
+ import pickle
22
+ import pyLDAvis
23
+ import pyLDAvis.gensim_models as gensimvis
24
+ from io import StringIO
25
+ from ipywidgets.embed import embed_minimal_html
26
+ from nltk.stem.snowball import SnowballStemmer
27
+ from bertopic import BERTopic
28
+ from bertopic.representation import KeyBERTInspired, MaximalMarginalRelevance, OpenAI, TextGeneration
29
+ import plotly.express as px
30
+ from sklearn.cluster import KMeans
31
+ from sklearn.feature_extraction.text import CountVectorizer
32
+ import bitermplus as btm
33
+ import tmplot as tmp
34
+ import tomotopy
35
+ import sys
36
+ import spacy
37
+ import en_core_web_sm
38
+ import pipeline
39
+ from html2image import Html2Image
40
+ from umap import UMAP
41
+ import os
42
+ import time
43
+ import json
44
+ from tools import sourceformat as sf
45
+ import datamapplot
46
+ from sentence_transformers import SentenceTransformer
47
+ import openai
48
+ from transformers import pipeline
49
+
50
+ #===config===
51
+ st.set_page_config(
52
+ page_title="Coconut",
53
+ page_icon="🥥",
54
+ layout="wide",
55
+ initial_sidebar_state="collapsed"
56
+ )
57
+
58
+ hide_streamlit_style = """
59
+ <style>
60
+ #MainMenu
61
+ {visibility: hidden;}
62
+ footer {visibility: hidden;}
63
+ [data-testid="collapsedControl"] {display: none}
64
+ </style>
65
+ """
66
+ st.markdown(hide_streamlit_style, unsafe_allow_html=True)
67
+
68
+ with st.popover("🔗 Menu"):
69
+ st.page_link("https://www.coconut-libtool.com/", label="Home", icon="🏠")
70
+ st.page_link("pages/1 Scattertext.py", label="Scattertext", icon="1️⃣")
71
+ st.page_link("pages/2 Topic Modeling.py", label="Topic Modeling", icon="2️⃣")
72
+ st.page_link("pages/3 Bidirected Network.py", label="Bidirected Network", icon="3️⃣")
73
+ st.page_link("pages/4 Sunburst.py", label="Sunburst", icon="4️⃣")
74
+ st.page_link("pages/5 Burst Detection.py", label="Burst Detection", icon="5️⃣")
75
+ st.page_link("pages/6 Keywords Stem.py", label="Keywords Stem", icon="6️⃣")
76
+ st.page_link("pages/7 Sentiment Analysis.py", label="Sentiment Analysis", icon="7️⃣")
77
+
78
+ st.header("Topic Modeling", anchor=False)
79
+ st.subheader('Put your file here...', anchor=False)
80
+
81
+ #========unique id========
82
+ @st.cache_resource(ttl=3600)
83
+ def create_list():
84
+ l = [1, 2, 3]
85
+ return l
86
+
87
+ l = create_list()
88
+ first_list_value = l[0]
89
+ l[0] = first_list_value + 1
90
+ uID = str(l[0])
91
+
92
+ @st.cache_data(ttl=3600)
93
+ def get_ext(uploaded_file):
94
+ extype = uID+uploaded_file.name
95
+ return extype
96
+
97
+ #===clear cache===
98
+
99
+ def reset_biterm():
100
+ try:
101
+ biterm_map.clear()
102
+ biterm_bar.clear()
103
+ except NameError:
104
+ biterm_topic.clear()
105
+
106
+ def reset_all():
107
+ st.cache_data.clear()
108
+
109
+ #===avoiding deadlock===
110
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
111
+
112
+ #===upload file===
113
+ @st.cache_data(ttl=3600)
114
+ def upload(file):
115
+ papers = pd.read_csv(uploaded_file)
116
+ if "About the data" in papers.columns[0]:
117
+ papers = sf.dim(papers)
118
+ col_dict = {'MeSH terms': 'Keywords',
119
+ 'PubYear': 'Year',
120
+ 'Times cited': 'Cited by',
121
+ 'Publication Type': 'Document Type'
122
+ }
123
+ papers.rename(columns=col_dict, inplace=True)
124
+
125
+ return papers
126
+
127
+ @st.cache_data(ttl=3600)
128
+ def conv_txt(extype):
129
+ if("PMID" in (uploaded_file.read()).decode()):
130
+ uploaded_file.seek(0)
131
+ papers = sf.medline(uploaded_file)
132
+ print(papers)
133
+ return papers
134
+ col_dict = {'TI': 'Title',
135
+ 'SO': 'Source title',
136
+ 'DE': 'Author Keywords',
137
+ 'DT': 'Document Type',
138
+ 'AB': 'Abstract',
139
+ 'TC': 'Cited by',
140
+ 'PY': 'Year',
141
+ 'ID': 'Keywords Plus',
142
+ 'rights_date_used': 'Year'}
143
+ uploaded_file.seek(0)
144
+ papers = pd.read_csv(uploaded_file, sep='\t')
145
+ if("htid" in papers.columns):
146
+ papers = sf.htrc(papers)
147
+ papers.rename(columns=col_dict, inplace=True)
148
+ print(papers)
149
+ return papers
150
+
151
+
152
+ @st.cache_data(ttl=3600)
153
+ def conv_json(extype):
154
+ col_dict={'title': 'title',
155
+ 'rights_date_used': 'Year',
156
+ }
157
+
158
+ data = json.load(uploaded_file)
159
+ hathifile = data['gathers']
160
+ keywords = pd.DataFrame.from_records(hathifile)
161
+
162
+ keywords = sf.htrc(keywords)
163
+ keywords.rename(columns=col_dict,inplace=True)
164
+ return keywords
165
+
166
+ @st.cache_resource(ttl=3600)
167
+ def conv_pub(extype):
168
+ if (get_ext(extype)).endswith('.tar.gz'):
169
+ bytedata = extype.read()
170
+ keywords = sf.readPub(bytedata)
171
+ elif (get_ext(extype)).endswith('.xml'):
172
+ bytedata = extype.read()
173
+ keywords = sf.readxml(bytedata)
174
+ return keywords
175
+
176
+ #===Read data===
177
+ uploaded_file = st.file_uploader('', type=['csv', 'txt','json','tar.gz','xml'], on_change=reset_all)
178
+
179
+ if uploaded_file is not None:
180
+ try:
181
+ extype = get_ext(uploaded_file)
182
+
183
+ if extype.endswith('.csv'):
184
+ papers = upload(extype)
185
+ elif extype.endswith('.txt'):
186
+ papers = conv_txt(extype)
187
+
188
+ elif extype.endswith('.json'):
189
+ papers = conv_json(extype)
190
+ elif extype.endswith('.tar.gz') or extype.endswith('.xml'):
191
+ papers = conv_pub(uploaded_file)
192
+
193
+ coldf = sorted(papers.select_dtypes(include=['object']).columns.tolist())
194
+
195
+ c1, c2, c3 = st.columns([3,3,4])
196
+ method = c1.selectbox(
197
+ 'Choose method',
198
+ ('Choose...', 'pyLDA', 'Biterm', 'BERTopic'))
199
+ ColCho = c2.selectbox('Choose column', (["Title","Abstract"]))
200
+ num_cho = c3.number_input('Choose number of topics', min_value=2, max_value=30, value=5)
201
+
202
+ d1, d2 = st.columns([3,7])
203
+ xgram = d1.selectbox("N-grams", ("1", "2", "3"))
204
+ xgram = int(xgram)
205
+ words_to_remove = d2.text_input("Remove specific words. Separate words by semicolons (;)")
206
+
207
+ rem_copyright = d1.toggle('Remove copyright statement', value=True)
208
+ rem_punc = d2.toggle('Remove punctuation', value=True)
209
+
210
+ #===advance settings===
211
+ with st.expander("🧮 Show advance settings"):
212
+ t1, t2, t3 = st.columns([3,3,4])
213
+ if method == 'pyLDA':
214
+ py_random_state = t1.number_input('Random state', min_value=0, max_value=None, step=1)
215
+ py_chunksize = t2.number_input('Chunk size', value=100 , min_value=10, max_value=None, step=1)
216
+ opt_threshold = t3.number_input('Threshold', value=100 , min_value=1, max_value=None, step=1)
217
+
218
+ elif method == 'Biterm':
219
+ btm_seed = t1.number_input('Random state seed', value=100 , min_value=1, max_value=None, step=1)
220
+ btm_iterations = t2.number_input('Iterations number', value=20 , min_value=2, max_value=None, step=1)
221
+ opt_threshold = t3.number_input('Threshold', value=100 , min_value=1, max_value=None, step=1)
222
+
223
+ elif method == 'BERTopic':
224
+ u1, u2 = st.columns([5,5])
225
+
226
+ bert_top_n_words = u1.number_input('top_n_words', value=5 , min_value=5, max_value=25, step=1)
227
+ bert_random_state = u2.number_input('random_state', value=42 , min_value=1, max_value=None, step=1)
228
+ bert_n_components = u1.number_input('n_components', value=5 , min_value=1, max_value=None, step=1)
229
+ bert_n_neighbors = u2.number_input('n_neighbors', value=15 , min_value=1, max_value=None, step=1)
230
+ bert_embedding_model = st.radio(
231
+ "embedding_model",
232
+ ["all-MiniLM-L6-v2", "paraphrase-multilingual-MiniLM-L12-v2", "en_core_web_sm"], index=0, horizontal=True)
233
+
234
+ fine_tuning = st.toggle("Use Fine-tuning")
235
+ if fine_tuning:
236
+ topic_labelling = st.toggle("Automatic topic labelling")
237
+ if topic_labelling:
238
+ llm_model = st.selectbox("Model",["OpenAI/gpt-4o","Google/Flan-t5","OpenAI/gpt-oss"])
239
+ if llm_model == "OpenAI/gpt-4o":
240
+ api_key = st.text_input("API Key")
241
+
242
+ else:
243
+ st.write('Please choose your preferred method')
244
+
245
+ #===clean csv===
246
+ @st.cache_data(ttl=3600, show_spinner=False)
247
+ def clean_csv(extype):
248
+ paper = papers.dropna(subset=[ColCho])
249
+
250
+ #===mapping===
251
+ paper['Abstract_pre'] = paper[ColCho].map(lambda x: x.lower())
252
+ if rem_punc:
253
+ paper['Abstract_pre'] = paper['Abstract_pre'].map(
254
+ lambda x: re.sub(f"[{re.escape(string.punctuation)}]", " ", x)
255
+ ).map(lambda x: re.sub(r"\s+", " ", x).strip())
256
+ paper['Abstract_pre'] = paper['Abstract_pre'].str.replace('[\u2018\u2019\u201c\u201d]', '', regex=True)
257
+ if rem_copyright:
258
+ paper['Abstract_pre'] = paper['Abstract_pre'].map(lambda x: re.sub('©.*', '', x))
259
+
260
+ #===stopword removal===
261
+ stop = stopwords.words('english')
262
+ paper['Abstract_stop'] = paper['Abstract_pre'].apply(lambda x: ' '.join([word for word in x.split() if word not in (stop)]))
263
+
264
+ #===lemmatize===
265
+ lemmatizer = WordNetLemmatizer()
266
+
267
+ @st.cache_resource(ttl=3600)
268
+ def lemmatize_words(text):
269
+ words = text.split()
270
+ words = [lemmatizer.lemmatize(word) for word in words]
271
+ return ' '.join(words)
272
+ paper['Abstract_lem'] = paper['Abstract_stop'].apply(lemmatize_words)
273
+
274
+ words_rmv = [word.strip() for word in words_to_remove.split(";")]
275
+ remove_dict = {word: None for word in words_rmv}
276
+
277
+ @st.cache_resource(ttl=3600)
278
+ def remove_words(text):
279
+ words = text.split()
280
+ cleaned_words = [word for word in words if word not in remove_dict]
281
+ return ' '.join(cleaned_words)
282
+ paper['Abstract_lem'] = paper['Abstract_lem'].map(remove_words)
283
+
284
+ topic_abs = paper.Abstract_lem.values.tolist()
285
+ return topic_abs, paper
286
+
287
+ topic_abs, paper=clean_csv(extype)
288
+
289
+ if st.button("Submit", on_click=reset_all):
290
+ num_topic = num_cho
291
+
292
+ if method == 'BERTopic':
293
+ st.info('BERTopic is an expensive process when dealing with a large volume of text with our existing resources. Please kindly wait until the visualization appears.', icon="ℹ️")
294
+
295
+ #===topic===
296
+ if method == 'Choose...':
297
+ st.write('')
298
+
299
+ elif method == 'pyLDA':
300
+ tab1, tab2, tab3, tab4 = st.tabs(["📈 Generate visualization", "📃 Reference", "📓 Recommended Reading", "⬇️ Download Help"])
301
+
302
+ with tab1:
303
+ #===visualization===
304
+ @st.cache_data(ttl=3600, show_spinner=False)
305
+ def pylda(extype):
306
+ topic_abs_LDA = [t.split(' ') for t in topic_abs]
307
+
308
+ bigram = Phrases(topic_abs_LDA, min_count=xgram, threshold=opt_threshold)
309
+ trigram = Phrases(bigram[topic_abs_LDA], threshold=opt_threshold)
310
+ bigram_mod = Phraser(bigram)
311
+ trigram_mod = Phraser(trigram)
312
+
313
+ topic_abs_LDA = [trigram_mod[bigram_mod[doc]] for doc in topic_abs_LDA]
314
+
315
+ id2word = Dictionary(topic_abs_LDA)
316
+ corpus = [id2word.doc2bow(text) for text in topic_abs_LDA]
317
+ #===LDA===
318
+ lda_model = LdaModel(corpus=corpus,
319
+ id2word=id2word,
320
+ num_topics=num_topic,
321
+ random_state=py_random_state,
322
+ chunksize=py_chunksize,
323
+ alpha='auto',
324
+ per_word_topics=False)
325
+ pprint(lda_model.print_topics())
326
+ doc_lda = lda_model[corpus]
327
+ topics = lda_model.show_topics(num_words = 30,formatted=False)
328
+
329
+ #===visualization===
330
+ coherence_model_lda = CoherenceModel(model=lda_model, texts=topic_abs_LDA, dictionary=id2word, coherence='c_v')
331
+ coherence_lda = coherence_model_lda.get_coherence()
332
+ vis = pyLDAvis.gensim_models.prepare(lda_model, corpus, id2word)
333
+ py_lda_vis_html = pyLDAvis.prepared_data_to_html(vis)
334
+ return py_lda_vis_html, coherence_lda, vis, topics
335
+
336
+ with st.spinner('Performing computations. Please wait ...'):
337
+ try:
338
+ py_lda_vis_html, coherence_lda, vis, topics = pylda(extype)
339
+ st.write('Coherence score: ', coherence_lda)
340
+ components.html(py_lda_vis_html, width=1500, height=800)
341
+ st.markdown('Copyright (c) 2015, Ben Mabey. https://github.com/bmabey/pyLDAvis')
342
+
343
+ @st.cache_data(ttl=3600, show_spinner=False)
344
+ def img_lda(vis):
345
+ pyLDAvis.save_html(vis, 'output.html')
346
+ hti = Html2Image()
347
+ hti.browser.flags = ['--default-background-color=ffffff', '--hide-scrollbars']
348
+ hti.browser.use_new_headless = None
349
+ css = "body {background: white;}"
350
+ hti.screenshot(
351
+ other_file='output.html', css_str=css, size=(1500, 800),
352
+ save_as='ldavis_img.png'
353
+ )
354
+
355
+ img_lda(vis)
356
+
357
+ d1, d2 = st.columns(2)
358
+ with open("ldavis_img.png", "rb") as file:
359
+ btn = d1.download_button(
360
+ label="Download image",
361
+ data=file,
362
+ file_name="ldavis_img.png",
363
+ mime="image/png"
364
+ )
365
+
366
+ #===download results===#
367
+ resultf = pd.DataFrame(topics)
368
+ #formatting
369
+ resultf = resultf.transpose()
370
+ resultf = resultf.drop([0])
371
+ resultf = resultf.explode(list(range(len(resultf.columns))), ignore_index=False)
372
+
373
+ resultcsv = resultf.to_csv().encode("utf-8")
374
+ d2.download_button(
375
+ label = "Download Results",
376
+ data=resultcsv,
377
+ file_name="results.csv",
378
+ mime="text\csv",
379
+ on_click="ignore")
380
+
381
+ except NameError as f:
382
+ st.warning('🖱️ Please click Submit')
383
+
384
+ with tab2:
385
+ st.markdown('**Sievert, C., & Shirley, K. (2014). LDAvis: A method for visualizing and interpreting topics. Proceedings of the Workshop on Interactive Language Learning, Visualization, and Interfaces.** https://doi.org/10.3115/v1/w14-3110')
386
+
387
+ with tab3:
388
+ st.markdown('**Chen, X., & Wang, H. (2019, January). Automated chat transcript analysis using topic modeling for library reference services. Proceedings of the Association for Information Science and Technology, 56(1), 368–371.** https://doi.org/10.1002/pra2.31')
389
+ st.markdown('**Joo, S., Ingram, E., & Cahill, M. (2021, December 15). Exploring Topics and Genres in Storytime Books: A Text Mining Approach. Evidence Based Library and Information Practice, 16(4), 41–62.** https://doi.org/10.18438/eblip29963')
390
+ st.markdown('**Lamba, M., & Madhusudhan, M. (2021, July 31). Topic Modeling. Text Mining for Information Professionals, 105–137.** https://doi.org/10.1007/978-3-030-85085-2_4')
391
+ st.markdown('**Lamba, M., & Madhusudhan, M. (2019, June 7). Mapping of topics in DESIDOC Journal of Library and Information Technology, India: a study. Scientometrics, 120(2), 477–505.** https://doi.org/10.1007/s11192-019-03137-5')
392
+
393
+ with tab4:
394
+ st.subheader(':blue[pyLDA]', anchor=False)
395
+ st.button('Download image')
396
+ st.text("Click Download Image button.")
397
+ st.divider()
398
+ st.subheader(':blue[Downloading CSV Results]', anchor=False)
399
+ st.button("Download Results")
400
+ st.text("Click Download results button at bottom of page")
401
+
402
+ #===Biterm===
403
+ elif method == 'Biterm':
404
+
405
+ #===optimize Biterm===
406
+ @st.cache_data(ttl=3600, show_spinner=False)
407
+ def biterm_topic(extype):
408
+ tokenized_abs = [t.split(' ') for t in topic_abs]
409
+
410
+ bigram = Phrases(tokenized_abs, min_count=xgram, threshold=opt_threshold)
411
+ trigram = Phrases(bigram[tokenized_abs], threshold=opt_threshold)
412
+ bigram_mod = Phraser(bigram)
413
+ trigram_mod = Phraser(trigram)
414
+
415
+ topic_abs_ngram = [trigram_mod[bigram_mod[doc]] for doc in tokenized_abs]
416
+
417
+ topic_abs_str = [' '.join(doc) for doc in topic_abs_ngram]
418
+
419
+
420
+ X, vocabulary, vocab_dict = btm.get_words_freqs(topic_abs_str)
421
+ tf = np.array(X.sum(axis=0)).ravel()
422
+ docs_vec = btm.get_vectorized_docs(topic_abs, vocabulary)
423
+ docs_lens = list(map(len, docs_vec))
424
+ biterms = btm.get_biterms(docs_vec)
425
+
426
+ model = btm.BTM(X, vocabulary, seed=btm_seed, T=num_topic, M=20, alpha=50/8, beta=0.01)
427
+ model.fit(biterms, iterations=btm_iterations)
428
+
429
+ p_zd = model.transform(docs_vec)
430
+ coherence = model.coherence_
431
+ phi = tmp.get_phi(model)
432
+ topics_coords = tmp.prepare_coords(model)
433
+ totaltop = topics_coords.label.values.tolist()
434
+ perplexity = model.perplexity_
435
+ top_topics = model.df_words_topics_
436
+
437
+ return topics_coords, phi, totaltop, perplexity, top_topics
438
+
439
+ tab1, tab2, tab3, tab4 = st.tabs(["📈 Generate visualization", "📃 Reference", "📓 Recommended Reading", "⬇️ Download Help"])
440
+ with tab1:
441
+ try:
442
+ with st.spinner('Performing computations. Please wait ...'):
443
+ topics_coords, phi, totaltop, perplexity, top_topics = biterm_topic(extype)
444
+ col1, col2 = st.columns([4,6])
445
+
446
+ @st.cache_data(ttl=3600)
447
+ def biterm_map(extype):
448
+ btmvis_coords = tmp.plot_scatter_topics(topics_coords, size_col='size', label_col='label', topic=numvis)
449
+ return btmvis_coords
450
+
451
+ @st.cache_data(ttl=3600)
452
+ def biterm_bar(extype):
453
+ terms_probs = tmp.calc_terms_probs_ratio(phi, topic=numvis, lambda_=1)
454
+ btmvis_probs = tmp.plot_terms(terms_probs, font_size=12)
455
+ return btmvis_probs
456
+
457
+ with col1:
458
+ st.write('Perplexity score: ', perplexity)
459
+ st.write('')
460
+ numvis = st.selectbox(
461
+ 'Choose topic',
462
+ (totaltop), on_change=reset_biterm)
463
+ btmvis_coords = biterm_map(extype)
464
+ st.altair_chart(btmvis_coords)
465
+ with col2:
466
+ btmvis_probs = biterm_bar(extype)
467
+ st.altair_chart(btmvis_probs, use_container_width=True)
468
+
469
+ #===download results===#
470
+ resultcsv = top_topics.to_csv().encode("utf-8")
471
+ st.download_button(label = "Download Results", data=resultcsv, file_name="results.csv", mime="text\csv", on_click="ignore")
472
+
473
+ except ValueError as g:
474
+ st.error('🙇‍♂️ Please raise the number of topics and click submit')
475
+
476
+ except NameError as f:
477
+ st.warning('🖱️ Please click Submit')
478
+
479
+ with tab2:
480
+ st.markdown('**Yan, X., Guo, J., Lan, Y., & Cheng, X. (2013, May 13). A biterm topic model for short texts. Proceedings of the 22nd International Conference on World Wide Web.** https://doi.org/10.1145/2488388.2488514')
481
+ with tab3:
482
+ st.markdown('**Cai, M., Shah, N., Li, J., Chen, W. H., Cuomo, R. E., Obradovich, N., & Mackey, T. K. (2020, August 26). Identification and characterization of tweets related to the 2015 Indiana HIV outbreak: A retrospective infoveillance study. PLOS ONE, 15(8), e0235150.** https://doi.org/10.1371/journal.pone.0235150')
483
+ st.markdown('**Chen, Y., Dong, T., Ban, Q., & Li, Y. (2021). What Concerns Consumers about Hypertension? A Comparison between the Online Health Community and the Q&A Forum. International Journal of Computational Intelligence Systems, 14(1), 734.** https://doi.org/10.2991/ijcis.d.210203.002')
484
+ st.markdown('**George, Crissandra J., "AMBIGUOUS APPALACHIANNESS: A LINGUISTIC AND PERCEPTUAL INVESTIGATION INTO ARC-LABELED PENNSYLVANIA COUNTIES" (2022). Theses and Dissertations-- Linguistics. 48.** https://doi.org/10.13023/etd.2022.217')
485
+ st.markdown('**Li, J., Chen, W. H., Xu, Q., Shah, N., Kohler, J. C., & Mackey, T. K. (2020). Detection of self-reported experiences with corruption on twitter using unsupervised machine learning. Social Sciences & Humanities Open, 2(1), 100060.** https://doi.org/10.1016/j.ssaho.2020.100060')
486
+ with tab4:
487
+ st.subheader(':blue[Biterm]', anchor=False)
488
+ st.text("Click the three dots at the top right then select the desired format.")
489
+ st.markdown("![Downloading visualization](https://raw.githubusercontent.com/faizhalas/library-tools/main/images/download_biterm.jpg)")
490
+ st.divider()
491
+ st.subheader(':blue[Downloading CSV Results]', anchor=False)
492
+ st.button("Download Results")
493
+ st.text("Click Download results button at bottom of page")
494
+
495
+
496
+ #===BERTopic===
497
+ elif method == 'BERTopic':
498
+ @st.cache_resource(ttl = 3600, show_spinner=False)
499
+ #@st.cache_data(ttl=3600, show_spinner=False)
500
+ def bertopic_vis(extype):
501
+ umap_model = UMAP(n_neighbors=bert_n_neighbors, n_components=bert_n_components,
502
+ min_dist=0.0, metric='cosine', random_state=bert_random_state)
503
+ cluster_model = KMeans(n_clusters=num_topic)
504
+ if bert_embedding_model == 'all-MiniLM-L6-v2':
505
+ model = SentenceTransformer('all-MiniLM-L6-v2')
506
+ lang = 'en'
507
+ embeddings = model.encode(topic_abs, show_progress_bar=True)
508
+
509
+ elif bert_embedding_model == 'en_core_web_sm':
510
+ nlp = en_core_web_sm.load(exclude=['tagger', 'parser', 'ner', 'attribute_ruler', 'lemmatizer'])
511
+ model = nlp
512
+ lang = 'en'
513
+ embeddings = np.array([nlp(text).vector for text in topic_abs])
514
+
515
+ elif bert_embedding_model == 'paraphrase-multilingual-MiniLM-L12-v2':
516
+ model = SentenceTransformer('paraphrase-multilingual-MiniLM-L12-v2')
517
+ lang = 'multilingual'
518
+ embeddings = model.encode(topic_abs, show_progress_bar=True)
519
+
520
+ representation_model = ""
521
+
522
+ if fine_tuning:
523
+ keybert = KeyBERTInspired()
524
+ mmr = MaximalMarginalRelevance(diversity=0.3)
525
+ representation_model = {
526
+ "KeyBERT": keybert,
527
+ "MMR": mmr,
528
+ }
529
+ if topic_labelling:
530
+ if llm_model == "OpenAI/gpt-4o":
531
+ client = openai.OpenAI(api_key=api_key)
532
+ representation_model = {
533
+ "KeyBERT": keybert,
534
+ "MMR": mmr,
535
+ "test": OpenAI(client, model = "gpt-4o-mini", delay_in_seconds=10)
536
+ }
537
+ elif llm_model == "Google/Flan-t5":
538
+ gen = pipeline("text2text-generation", model = "google/flan-t5-base")
539
+ clientmod = TextGeneration(gen)
540
+ representation_model = {
541
+ "KeyBERT": keybert,
542
+ "MMR": mmr,
543
+ "test": clientmod
544
+ }
545
+ elif llm_model == "OpenAI/gpt-oss":
546
+ gen = pipeline("text-generation",
547
+ model = "unsloth/gpt-oss-20b-BF16",
548
+ torch_dtype = "auto",
549
+ device_map = "auto",
550
+ )
551
+ clientmod = TextGeneration(gen)
552
+
553
+ representation_model = {
554
+ "KeyBERT": keybert,
555
+ "MMR": mmr,
556
+ "test": gen
557
+ }
558
+
559
+
560
+
561
+ vectorizer_model = CountVectorizer(ngram_range=(1, xgram), stop_words='english')
562
+ topic_model = BERTopic(representation_model = representation_model, embedding_model=model, hdbscan_model=cluster_model, language=lang, umap_model=umap_model, vectorizer_model=vectorizer_model, top_n_words=bert_top_n_words)
563
+ topics, probs = topic_model.fit_transform(topic_abs, embeddings=embeddings)
564
+
565
+ if(fine_tuning and topic_labelling):
566
+ generated_labels = [label[0][0].split("\n")[0] for label in topic_model.get_topics(full=True)["test"].values()]
567
+ topic_model.set_topic_labels(generated_labels)
568
+
569
+ return topic_model, topics, probs, embeddings
570
+
571
+ @st.cache_resource(ttl = 3600, show_spinner=False)
572
+ def Vis_Topics(extype):
573
+ fig1 = topic_model.visualize_topics()
574
+ return fig1
575
+ @st.cache_resource(ttl = 3600, show_spinner=False)
576
+ def Vis_Documents(extype):
577
+ fig2 = topic_model.visualize_document_datamap(topic_abs, embeddings=embeddings, custom_labels = True)
578
+ return fig2
579
+ @st.cache_resource(ttl = 3600, show_spinner=False)
580
+ def Vis_Hierarchy(extype):
581
+ fig3 = topic_model.visualize_hierarchy(top_n_topics=num_topic, custom_labels = True)
582
+ return fig3
583
+ @st.cache_resource(ttl = 3600, show_spinner=False)
584
+ def Vis_Heatmap(extype):
585
+ global topic_model
586
+ fig4 = topic_model.visualize_heatmap(n_clusters=num_topic-1, width=1000, height=1000, custom_labels = True)
587
+ return fig4
588
+ @st.cache_resource(ttl = 3600, show_spinner=False)
589
+ def Vis_Barchart(extype):
590
+ fig5 = topic_model.visualize_barchart(top_n_topics=num_topic, custom_labels = True)
591
+ return fig5
592
+
593
+ tab1, tab2, tab3, tab4 = st.tabs(["📈 Generate visualization", "📃 Reference", "📓 Recommended Reading", "⬇️ Download Help"])
594
+ with tab1:
595
+ try:
596
+ with st.spinner('Performing computations. Please wait ...'):
597
+
598
+ topic_model, topics, probs, embeddings = bertopic_vis(extype)
599
+ time.sleep(.5)
600
+ st.toast('Visualize Topics', icon='🏃')
601
+ fig1 = Vis_Topics(extype)
602
+
603
+ time.sleep(.5)
604
+ st.toast('Visualize Document', icon='🏃')
605
+ fig2 = Vis_Documents(extype)
606
+
607
+ time.sleep(.5)
608
+ st.toast('Visualize Document Hierarchy', icon='🏃')
609
+ fig3 = Vis_Hierarchy(extype)
610
+
611
+ time.sleep(.5)
612
+ st.toast('Visualize Topic Similarity', icon='🏃')
613
+ fig4 = Vis_Heatmap(extype)
614
+
615
+ time.sleep(.5)
616
+ st.toast('Visualize Terms', icon='🏃')
617
+ fig5 = Vis_Barchart(extype)
618
+
619
+ bertab1, bertab2, bertab3, bertab4, bertab5 = st.tabs(["Visualize Topics", "Visualize Terms", "Visualize Documents",
620
+ "Visualize Document Hierarchy", "Visualize Topic Similarity"])
621
+
622
+ with bertab1:
623
+ st.plotly_chart(fig1, use_container_width=True)
624
+ with bertab2:
625
+ st.plotly_chart(fig5, use_container_width=True)
626
+ with bertab3:
627
+ st.plotly_chart(fig2, use_container_width=True)
628
+ with bertab4:
629
+ st.plotly_chart(fig3, use_container_width=True)
630
+ with bertab5:
631
+ st.plotly_chart(fig4, use_container_width=True)
632
+
633
+ #===download results===#
634
+ results = topic_model.get_topic_info()
635
+ resultf = pd.DataFrame(results)
636
+ resultcsv = resultf.to_csv().encode("utf-8")
637
+ st.download_button(
638
+ label = "Download Results",
639
+ data=resultcsv,
640
+ file_name="results.csv",
641
+ mime="text\csv",
642
+ on_click="ignore",
643
+ )
644
+
645
+ except ValueError as e:
646
+ st.write(e)
647
+ st.error('🙇‍♂️ Please raise the number of topics and click submit')
648
+
649
+
650
+ except NameError as e:
651
+ st.warning('🖱️ Please click Submit')
652
+ st.write(e)
653
+
654
+ with tab2:
655
+ st.markdown('**Grootendorst, M. (2022). BERTopic: Neural topic modeling with a class-based TF-IDF procedure. arXiv preprint arXiv:2203.05794.** https://doi.org/10.48550/arXiv.2203.05794')
656
+
657
+ with tab3:
658
+ st.markdown('**Jeet Rawat, A., Ghildiyal, S., & Dixit, A. K. (2022, December 1). Topic modelling of legal documents using NLP and bidirectional encoder representations from transformers. Indonesian Journal of Electrical Engineering and Computer Science, 28(3), 1749.** https://doi.org/10.11591/ijeecs.v28.i3.pp1749-1755')
659
+ st.markdown('**Yao, L. F., Ferawati, K., Liew, K., Wakamiya, S., & Aramaki, E. (2023, April 20). Disruptions in the Cystic Fibrosis Community’s Experiences and Concerns During the COVID-19 Pandemic: Topic Modeling and Time Series Analysis of Reddit Comments. Journal of Medical Internet Research, 25, e45249.** https://doi.org/10.2196/45249')
660
+
661
+ with tab4:
662
+ st.divider()
663
+ st.subheader(':blue[BERTopic]', anchor=False)
664
+ st.text("Click the camera icon on the top right menu")
665
+ st.markdown("![Downloading visualization](https://raw.githubusercontent.com/faizhalas/library-tools/main/images/download_bertopic.jpg)")
666
+ st.divider()
667
+ st.subheader(':blue[Downloading CSV Results]', anchor=False)
668
+ st.button("Download Results")
669
+ st.text("Click Download results button at bottom of page")
670
+
671
+ except Exception as e:
672
+ st.error("Please ensure that your file is correct. Please contact us if you find that this is an error.", icon="🚨")
673
+ st.write(e)
674
+ st.stop()