jedick commited on
Commit
13753a4
·
1 Parent(s): 908a00f

Update sources text

Browse files
Files changed (1) hide show
  1. app.py +81 -46
app.py CHANGED
@@ -54,42 +54,33 @@ def prediction_to_df(prediction=None):
54
  my_theme = gr.Theme.from_hub("NoCrypt/miku")
55
  my_theme.set(body_background_fill="#FFFFFF", body_background_fill_dark="#000000")
56
 
 
 
 
 
 
 
 
 
 
 
 
57
  # Gradio interface setup
58
- with gr.Blocks(theme=my_theme) as demo:
59
 
60
  # Layout
61
  with gr.Row():
62
  with gr.Column(scale=3):
63
  with gr.Row():
64
- gr.Markdown(
65
- """
66
- # AI4citations
67
-
68
- ### Usage:
69
-
70
- 1. Input a **Claim**
71
- 2. Input **Evidence** statements
72
- - *Optional:* Upload a PDF and click Get Evidence
73
- """
74
- )
75
- gr.Markdown(
76
- """
77
- ## *AI-powered citation verification*
78
-
79
- ### To make predictions:
80
-
81
- - Hit 'Enter' in the **Claim** text box,
82
- - Hit 'Shift-Enter' in the **Evidence** text box, or
83
- - Click Get Evidence
84
- """
85
- )
86
  claim = gr.Textbox(
87
  label="1. Claim",
88
  info="aka hypothesis",
89
- placeholder="Input claim or use Get Claim from Text",
90
  )
91
  with gr.Row():
92
- with gr.Accordion("Get Evidence from PDF", open=True):
93
  pdf_file = gr.File(label="Upload PDF", type="filepath", height=120)
94
  get_evidence = gr.Button(value="Get Evidence")
95
  top_k = gr.Slider(
@@ -121,8 +112,8 @@ with gr.Blocks(theme=my_theme) as demo:
121
  y_lim=([0, 1]),
122
  visible=False,
123
  )
124
- label = gr.Label()
125
- with gr.Accordion("Settings", open=False):
126
  # Create dropdown menu to select the model
127
  dropdown = gr.Dropdown(
128
  choices=[
@@ -136,7 +127,7 @@ with gr.Blocks(theme=my_theme) as demo:
136
  label="Model",
137
  )
138
  radio = gr.Radio(["label", "barplot"], value="label", label="Results")
139
- with gr.Accordion("Examples", open=False):
140
  gr.Markdown("*Examples are run when clicked*"),
141
  with gr.Row():
142
  support_example = gr.Examples(
@@ -165,27 +156,71 @@ with gr.Blocks(theme=my_theme) as demo:
165
  )
166
  retrieval_example = gr.Examples(
167
  examples="examples/retrieval",
168
- label="Retrieval",
169
  inputs=[pdf_file, claim],
170
  example_labels=pd.read_csv("examples/retrieval/log.csv")[
171
  "label"
172
  ].tolist(),
173
  )
174
- gr.Markdown(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
  """
176
- ### Sources
177
- - ML engineering project: [jedick/MLE-capstone-project](https://github.com/jedick/MLE-capstone-project)
178
- - App repository: [jedick/AI4citations](https://github.com/jedick/AI4citations)
179
- - Fine-tuned model: [jedick/DeBERTa-v3-base-mnli-fever-anli-scifact-citint](https://huggingface.co/jedick/DeBERTa-v3-base-mnli-fever-anli-scifact-citint)
180
- - Datasets used for fine-tuning
181
- - SciFact: [allenai/SciFact](https://github.com/allenai/scifact)
182
- - Citation-Integrity (CitInt): [ScienceNLP-Lab/Citation-Integrity](https://github.com/ScienceNLP-Lab/Citation-Integrity)
183
- - Base model: [MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli](https://huggingface.co/MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli)
184
- - Evidence retrieval: [xhluca/bm25s](https://github.com/xhluca/bm25s)
185
- - Gradio theme: [NoCrypt/miku](https://huggingface.co/spaces/NoCrypt/miku)
186
- - Examples: [MNLI (Poirot)](https://huggingface.co/datasets/nyu-mll/multi_nli/viewer/default/train?row=37&views%5B%5D=train), [CRISPR (evidence)](https://en.wikipedia.org/wiki/CRISPR)
187
- """
188
- )
189
 
190
  # Functions
191
 
@@ -206,9 +241,9 @@ with gr.Blocks(theme=my_theme) as demo:
206
  # Return two instances of the prediction to send to different Gradio components
207
  return prediction, prediction
208
 
209
- def use_model(model_name):
210
  """
211
- Use the specified model
212
  """
213
  global pipe, MODEL_NAME
214
  MODEL_NAME = model_name
@@ -353,7 +388,7 @@ with gr.Blocks(theme=my_theme) as demo:
353
 
354
  # Change the model the update the predictions
355
  dropdown.change(
356
- fn=use_model,
357
  inputs=dropdown,
358
  ).then(
359
  fn=query_model,
 
54
  my_theme = gr.Theme.from_hub("NoCrypt/miku")
55
  my_theme.set(body_background_fill="#FFFFFF", body_background_fill_dark="#000000")
56
 
57
+ # Custom CSS to center content
58
+ custom_css = """
59
+ .center-content {
60
+ text-align: center;
61
+ display:block;
62
+ }
63
+ """
64
+
65
+ # Define the HTML for Font Awesome
66
+ font_awesome_html = '<link href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0-beta3/css/all.min.css" rel="stylesheet">'
67
+
68
  # Gradio interface setup
69
+ with gr.Blocks(theme=my_theme, css=custom_css, head=font_awesome_html) as demo:
70
 
71
  # Layout
72
  with gr.Row():
73
  with gr.Column(scale=3):
74
  with gr.Row():
75
+ gr.Markdown("# AI4citations")
76
+ gr.Markdown("## *AI-powered scientific citation verification*")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
  claim = gr.Textbox(
78
  label="1. Claim",
79
  info="aka hypothesis",
80
+ placeholder="Input claim",
81
  )
82
  with gr.Row():
83
+ with gr.Accordion("Get Evidence from PDF"):
84
  pdf_file = gr.File(label="Upload PDF", type="filepath", height=120)
85
  get_evidence = gr.Button(value="Get Evidence")
86
  top_k = gr.Slider(
 
112
  y_lim=([0, 1]),
113
  visible=False,
114
  )
115
+ label = gr.Label(label="Results")
116
+ with gr.Accordion("Settings"):
117
  # Create dropdown menu to select the model
118
  dropdown = gr.Dropdown(
119
  choices=[
 
127
  label="Model",
128
  )
129
  radio = gr.Radio(["label", "barplot"], value="label", label="Results")
130
+ with gr.Accordion("Examples"):
131
  gr.Markdown("*Examples are run when clicked*"),
132
  with gr.Row():
133
  support_example = gr.Examples(
 
156
  )
157
  retrieval_example = gr.Examples(
158
  examples="examples/retrieval",
159
+ label="Get Evidence from PDF",
160
  inputs=[pdf_file, claim],
161
  example_labels=pd.read_csv("examples/retrieval/log.csv")[
162
  "label"
163
  ].tolist(),
164
  )
165
+
166
+ # Sources and acknowledgments
167
+
168
+ with gr.Row():
169
+ with gr.Column(scale=3):
170
+ with gr.Row():
171
+ with gr.Column(scale=1):
172
+ gr.Markdown(
173
+ """
174
+ ### Usage:
175
+
176
+ 1. Input a **Claim**
177
+ 2. Input **Evidence** statements
178
+ - *Optional:* Upload a PDF and click Get Evidence
179
+ """
180
+ )
181
+ with gr.Column(scale=2):
182
+ gr.Markdown(
183
+ """
184
+ ### To make predictions:
185
+
186
+ - Hit 'Enter' in the **Claim** text box,
187
+ - Hit 'Shift-Enter' in the **Evidence** text box, or
188
+ - Click Get Evidence
189
+ """
190
+ )
191
+
192
+ with gr.Column(scale=2, elem_classes=["center-content"]):
193
+ with gr.Accordion("Sources", open=False):
194
+ gr.Markdown(
195
+ """
196
+ #### *Capstone project*
197
+ - <i class="fa-brands fa-github"></i> [jedick/MLE-capstone-project](https://github.com/jedick/MLE-capstone-project) (project repo)
198
+ - <i class="fa-brands fa-github"></i> [jedick/AI4citations](https://github.com/jedick/AI4citations) (app repo)
199
+ """
200
+ )
201
+ gr.Markdown(
202
+ """
203
+ #### *Models*
204
+ - <img src="https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo.svg" style="height: 1.2em; display: inline-block;"> [jedick/DeBERTa-v3-base-mnli-fever-anli-scifact-citint](https://huggingface.co/jedick/DeBERTa-v3-base-mnli-fever-anli-scifact-citint) (fine-tuned)
205
+ - <img src="https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo.svg" style="height: 1.2em; display: inline-block;"> [MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli](https://huggingface.co/MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli) (base)
206
+ """
207
+ )
208
+ gr.Markdown(
209
+ """
210
+ #### *Datasets for fine-tuning*
211
+ - <i class="fa-brands fa-github"></i> [allenai/SciFact](https://github.com/allenai/scifact) (SciFact)
212
+ - <i class="fa-brands fa-github"></i> [ScienceNLP-Lab/Citation-Integrity](https://github.com/ScienceNLP-Lab/Citation-Integrity) (CitInt)
213
  """
214
+ )
215
+ gr.Markdown(
216
+ """
217
+ #### *Other sources*
218
+ - <i class="fa-brands fa-github"></i> [xhluca/bm25s](https://github.com/xhluca/bm25s) (evidence retrieval)
219
+ - <img src="https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo.svg" style="height: 1.2em; display: inline-block;"> [nyu-mll/multi_nli](https://huggingface.co/datasets/nyu-mll/multi_nli/viewer/default/train?row=37&views%5B%5D=train) (MNLI example)
220
+ - <img src="https://plos.org/wp-content/uploads/2020/01/logo-color-blue.svg" style="height: 1.4em; display: inline-block;"> [Medicine](https://doi.org/10.1371/journal.pmed.0030197), <i class="fa-brands fa-wikipedia-w"></i> [CRISPR](https://en.wikipedia.org/wiki/CRISPR) (get evidence examples)
221
+ - <img src="https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo.svg" style="height: 1.2em; display: inline-block;"> [NoCrypt/miku](https://huggingface.co/spaces/NoCrypt/miku) (theme)
222
+ """
223
+ )
 
 
 
224
 
225
  # Functions
226
 
 
241
  # Return two instances of the prediction to send to different Gradio components
242
  return prediction, prediction
243
 
244
+ def select_model(model_name):
245
  """
246
+ Select the specified model
247
  """
248
  global pipe, MODEL_NAME
249
  MODEL_NAME = model_name
 
388
 
389
  # Change the model the update the predictions
390
  dropdown.change(
391
+ fn=select_model,
392
  inputs=dropdown,
393
  ).then(
394
  fn=query_model,