|
import atexit |
|
import base64 |
|
import io |
|
import json |
|
import os |
|
import re |
|
import tempfile |
|
import threading |
|
import time |
|
import uuid |
|
import zipfile |
|
from pathlib import Path |
|
|
|
import gradio as gr |
|
import requests |
|
from PIL import Image |
|
|
|
|
|
|
|
API_URL = os.environ["API_URL"] |
|
|
|
TOKEN = os.environ["API_TOKEN"] |
|
|
|
LOGO_PATH = Path(__file__).parent / "pp-structurev3.png" |
|
with open(LOGO_PATH, "rb") as image_file: |
|
LOGO_BASE64 = ( |
|
f"data:image/png;base64,{base64.b64encode(image_file.read()).decode('utf-8')}" |
|
) |
|
|
|
TEMP_DIR = tempfile.TemporaryDirectory() |
|
atexit.register(TEMP_DIR.cleanup) |
|
|
|
paddle_theme = gr.themes.Soft( |
|
font=["Roboto", "Open Sans", "Arial", "sans-serif"], |
|
font_mono=["Fira Code", "monospace"], |
|
) |
|
|
|
|
|
CSS = """ |
|
:root { |
|
--sand-color: #FAF9F6; |
|
--white: #ffffff; |
|
--shadow: 0 4px 6px rgba(0, 0, 0, 0.1); |
|
--text-color: #F3F4F7; |
|
--black:#000000; |
|
--link-hover: #2b6cb0; |
|
--content-width: 1200px; |
|
} |
|
body { |
|
display: flex; |
|
justify-content: center; |
|
background-color: var(--sand-color); |
|
color: var(--text-color); |
|
font-family: Arial, sans-serif; |
|
} |
|
.upload-section { |
|
width: 100%; |
|
margin: 0 auto 30px; |
|
padding: 20px; |
|
background-color: var(--sand-color) !important; |
|
border-radius: 8px; |
|
box-shadow: var(--shadow); |
|
} |
|
.center-content { |
|
display: flex; |
|
flex-direction: column; |
|
align-items: center; |
|
text-align: center; |
|
margin-bottom: 20px; |
|
} |
|
.header { |
|
margin-bottom: 30px; |
|
width: 100%; |
|
} |
|
.logo-container { |
|
width: 100%; |
|
margin-bottom: 20px; |
|
} |
|
.logo-img { |
|
width: 100%; |
|
max-width: var(--content-width); |
|
margin: 0 auto; |
|
display: block; |
|
} |
|
.nav-bar { |
|
display: flex; |
|
justify-content: center; |
|
background-color: var(--white); |
|
padding: 15px 0; |
|
box-shadow: var(--shadow); |
|
margin-bottom: 20px; |
|
} |
|
.nav-links { |
|
display: flex; |
|
gap: 30px; |
|
width: 100%; |
|
justify-content: center; |
|
} |
|
.nav-link { |
|
color: var(--black); |
|
text-decoration: none; |
|
font-weight: bold; |
|
font-size: 24px; |
|
transition: color 0.2s; |
|
} |
|
.nav-link:hover { |
|
color: var(--link-hover); |
|
text-decoration: none; |
|
} |
|
button { |
|
background-color: var(--text-color) !important; |
|
color: var(--black) !important; |
|
border: none !important; |
|
border-radius: 4px; |
|
padding: 8px 16px; |
|
} |
|
.file-download { |
|
margin-top: 15px !important; |
|
} |
|
.loader { |
|
border: 5px solid #f3f3f3; |
|
border-top: 5px solid #3498db; |
|
border-radius: 50%; |
|
width: 50px; |
|
height: 50px; |
|
animation: spin 1s linear infinite; |
|
margin: 20px auto; |
|
} |
|
@keyframes spin { |
|
0% { transform: rotate(0deg); } |
|
100% { transform: rotate(360deg); } |
|
} |
|
.loader-container { |
|
text-align: center; |
|
margin: 20px 0; |
|
} |
|
.loader-container-prepare { |
|
text-align: left; |
|
margin: 20px 0; |
|
} |
|
.bold-label .gr-radio { |
|
margin-top: 8px; |
|
background-color: var(--white); |
|
padding: 10px; |
|
border-radius: 4px; |
|
} |
|
.bold-label .gr-radio label { |
|
font-size: 14px; |
|
color: var(--black); |
|
} |
|
#analyze-btn { |
|
background-color: #FF5722 !important; |
|
color: white !important; |
|
transition: all 0.3s ease !important; |
|
box-shadow: 0 2px 5px rgba(0,0,0,0.2) !important; |
|
bottom: 1% !important; |
|
left: 3% !important; |
|
z-index: 1000 !important; |
|
} |
|
#unzip-btn { |
|
background-color: #4CAF50 !important; |
|
color: white !important; |
|
transition: all 0.3s ease !important; |
|
box-shadow: 0 2px 5px rgba(0,0,0,0.2) !important; |
|
bottom: 1% !important; |
|
left: 18% !important; |
|
z-index: 1000 !important; |
|
} |
|
#download_file { |
|
position: fixed !important; |
|
bottom: 1% !important; |
|
left: 22% !important; |
|
z-index: 1000 !important; |
|
} |
|
#analyze-btn:hover,#unzip-btn:hover{ |
|
transform: translateY(-3px) !important; |
|
box-shadow: 0 4px 8px rgba(0,0,0,0.3) !important; |
|
} |
|
.square-pdf-btn { |
|
width: 90% !important; |
|
height: 3% !important; |
|
padding: 0 !important; |
|
display: flex !important; |
|
flex-direction: column !important; |
|
align-items: center !important; |
|
justify-content: center !important; |
|
gap: 8px !important; |
|
} |
|
.square-pdf-btn img { |
|
width: 20% !important; |
|
height: 1% !important; |
|
margin: 0 !important; |
|
} |
|
.square-pdf-btn span { |
|
font-size: 14px !important; |
|
text-align: center !important; |
|
} |
|
.gradio-gallery-item:hover { |
|
background-color: transparent !important; |
|
filter: none !important; |
|
transform: none !important; |
|
} |
|
.custom-markdown h3 { |
|
font-size: 25px !important; |
|
} |
|
.tight-spacing { |
|
margin-bottom: -5px !important; |
|
} |
|
.tight-spacing-as { |
|
margin-top: 0px !important; |
|
margin-bottom: 0px !important; |
|
} |
|
|
|
.image-container img { |
|
display: inline-block !important; |
|
} |
|
} |
|
""" |
|
MAX_NUM_PAGES = 10 |
|
TMP_DELETE_TIME = 900 |
|
THREAD_WAKEUP_TIME = 600 |
|
|
|
EXAMPLES_TEST = [ |
|
"examples/chinese-formula.jpg", |
|
"examples/chemical-equation.jpg", |
|
"examples/formula-chart.jpg", |
|
"examples/table.jpg", |
|
"examples/complex-formula.jpg", |
|
"examples/complex-typeset.jpg", |
|
"examples/muti-column.jpg", |
|
"examples/Handwritten.jpg", |
|
"examples/janpan-paper.jpg", |
|
"examples/vertical-text.jpg", |
|
"examples/tradition-chinese.jpg", |
|
] |
|
|
|
DESC_DICT = { |
|
"concatenate_pages": "Whether to merge pages", |
|
"use_formula_recognition": "Whether to use formula recognition subpipeline. If used, the formula can be converted into Latex code. Otherwise, the formula part is the text recognition result.", |
|
"use_chart_recognition": "Use the PP-Chart2Table model to parse and convert the charts in the document into tables.", |
|
"use_doc_orientation_classify": "Whether to use the document image orientation classification module. After use, you can correct distorted images, such as wrinkles, tilts, etc.", |
|
"use_doc_unwarping": "Whether to use the document unwarping module. After use, you can correct distorted images, such as wrinkles, tilts, etc.", |
|
"use_textline_orientation": "Whether to use the text line orientation classification module to support the distinction and correction of text lines of 0 degrees and 180 degrees.", |
|
"use_region_detection": "Whether to use the layout region detection. After using it, it can handle complex layouts such as newspapers and magazines.", |
|
"use_seal_recognition": "Whether to use seal text recognition subpipeline. After use, the seal text content in the document can be extracted.", |
|
"use_table_recognition": "Whether to use table recognition subpipeline. If used, the table can be identified as a structured format (such as HTML). Otherwise, the table will be regarded as figure.", |
|
"layout_threshold_nb": "The threshold used to filter out low confidence prediction results for the layout region, ranging from 0 to 1. If there are missed regions, this value can be appropriately lowered.", |
|
"layout_nms": "Whether to use layout region detection model uses NMS post-processing. After using it, nested boxes or those with large intersections can be removed.", |
|
"layout_unclip_ratio_nb": "Use this method to expand each region of the layout. The larger the value, the larger the expanded region.", |
|
"text_det_limit_type": "[Short side] means to ensure that the shortest side of the image is not less than [Image side length limit for text detection], and [Long side] means to ensure that the longest side of the image is not greater than [Image side length limit for text detection].", |
|
"text_det_limit_side_len_nb": "For the side length limit of the text detection input image, for large images with dense text, if you want more accurate recognition, you should choose a larger size. This parameter is used in conjunction with the [Image side length limit type for text detection]. Generally, the maximum [Long side] is suitable for scenes with large images and text, and the minimum [Short side] is suitable for document scenes with small and dense images.", |
|
"text_det_thresh_nb": "In the output probability map, only pixels with scores greater than the threshold are considered text pixels, and the value range is 0~1.", |
|
"text_det_box_thresh_nb": "When the average score of all pixels in the detection result border is greater than the threshold, the result will be considered as a text area, and the value range is 0 to 1. If missed detection occurs, this value can be appropriately lowered.", |
|
"text_det_unclip_ratio_nb": "Use this method to expand the text area. The larger the value, the larger the expanded area.", |
|
"text_rec_score_thresh_nb": "After text detection, the text box performs text recognition, and the text results with scores greater than the threshold will be retained. The value range is 0~1.", |
|
"seal_det_limit_type": "[Short side] means ensuring that the shortest side of the image is not less than [Image side length limit for seal text recognition], and [Long side] means ensuring that the longest side of the image is not greater than [Image side length limit for seal text recognition].", |
|
"seal_det_limit_side_len_nb": "For the side length limit of the input image for seal text detection, for large images with dense text, if you want more accurate recognition, you should choose a larger size. This parameter is used in conjunction with [Image side length limit type for seal text detection]. Generally, the maximum [Long side] is suitable for scenes with large images and text, and the minimum [Short side] is suitable for document scenes with small and dense images and text.", |
|
"seal_det_thresh_nb": "In the output probability map, only pixels with scores greater than the threshold are considered text pixels, and the value range is 0~1.", |
|
"seal_det_box_thresh_nb": "When the average score of all pixels within the detection result border is greater than the threshold, the result will be considered as a text area, and the value range is 0~1.", |
|
"seal_det_unclip_ratio_nb": "Use this method to expand the seal text area. The larger the value, the larger the expanded area.", |
|
"seal_rec_score_thresh_nb": "After the seal text is detected, the text box is subjected to text recognition. The text results with scores greater than the threshold will be retained. The value range is 0~1.", |
|
"use_ocr_results_with_table_cells": "Whether to enable the cell OCR mode. If not enabled, the global OCR result is used to fill the HTML table. If enabled, OCR is performed cell by cell and filled into the HTML table (which will increase the time consumption).", |
|
"use_e2e_wired_table_rec_model": "Whether to enable the wired table end-to-end prediction mode. If not enabled, the table cell detection model prediction results are used to fill the HTML table. If enabled, the end-to-end table structure recognition model cell prediction results are used to fill the HTML table.", |
|
"use_e2e_wireless_table_rec_model": "Whether to enable the wireless table end-to-end prediction mode. If not enabled, the table cell detection model prediction results are used to fill the HTML table. If enabled, the end-to-end table structure recognition model cell prediction results are used to fill the HTML table.", |
|
"use_wired_table_cells_trans_to_html": "The wired table cell detection results are directly converted to HTML. The wired table structure recognition model is no longer used to predict the HTML structure. Instead, HTML is directly constructed based on the geometric relationship of the wired table cell detection results.", |
|
"use_wireless_table_cells_trans_to_html": "The wireless table cell detection results are directly converted to HTML. The wireless table structure recognition model is no longer used to predict the HTML structure. Instead, HTML is directly constructed based on the geometric relationship of the wireless table cell detection results.", |
|
"use_table_orientation_classify": "Using table orientation classification, when the table in the image is rotated 90/180/270 degrees, the orientation can be corrected and the table recognition can be completed correctly.", |
|
} |
|
tmp_time = {} |
|
lock = threading.Lock() |
|
|
|
|
|
def gen_tooltip_radio(desc_dict): |
|
tooltip = {} |
|
for key, desc in desc_dict.items(): |
|
suffixes = ["_rd", "_md"] |
|
if key.endswith("_nb"): |
|
suffix = "_nb" |
|
suffixes = ["_nb", "_md"] |
|
key = key[: -len(suffix)] |
|
for suffix in suffixes: |
|
tooltip[f"{key}{suffix}"] = desc |
|
return tooltip |
|
|
|
|
|
TOOLTIP_RADIO = gen_tooltip_radio(DESC_DICT) |
|
|
|
|
|
def url_to_bytes(url, *, timeout=10): |
|
resp = requests.get(url, timeout=timeout) |
|
resp.raise_for_status() |
|
return resp.content |
|
|
|
|
|
def bytes_to_image(image_bytes): |
|
return Image.open(io.BytesIO(image_bytes)) |
|
|
|
|
|
def embed_images_into_markdown_text(markdown_text, markdown_images): |
|
for img_path, img_url in markdown_images.items(): |
|
|
|
markdown_text = markdown_text.replace( |
|
f'<img src="{img_path}"', f'<img src="{img_url}"' |
|
) |
|
return markdown_text |
|
|
|
|
|
|
|
def concatenate_markdown_pages(markdown_list): |
|
markdown_texts = "" |
|
previous_page_last_element_paragraph_end_flag = True |
|
|
|
for res in markdown_list: |
|
|
|
page_first_element_paragraph_start_flag: bool = res["isStart"] |
|
page_last_element_paragraph_end_flag: bool = res["isEnd"] |
|
|
|
|
|
if ( |
|
not page_first_element_paragraph_start_flag |
|
and not previous_page_last_element_paragraph_end_flag |
|
): |
|
last_char_of_markdown = markdown_texts[-1] if markdown_texts else "" |
|
first_char_of_handler = res["text"] |
|
|
|
|
|
last_is_chinese_char = ( |
|
re.match(r"[\u4e00-\u9fff]", last_char_of_markdown) |
|
if last_char_of_markdown |
|
else False |
|
) |
|
first_is_chinese_char = ( |
|
re.match(r"[\u4e00-\u9fff]", first_char_of_handler) |
|
if first_char_of_handler |
|
else False |
|
) |
|
if not (last_is_chinese_char or first_is_chinese_char): |
|
markdown_texts += " " + res["text"] |
|
else: |
|
markdown_texts += res["text"] |
|
else: |
|
markdown_texts += "\n\n" + res["text"] |
|
previous_page_last_element_paragraph_end_flag = ( |
|
page_last_element_paragraph_end_flag |
|
) |
|
|
|
return markdown_texts |
|
|
|
|
|
def process_file( |
|
file_path, |
|
image_input, |
|
use_formula_recognition, |
|
use_chart_recognition, |
|
use_doc_orientation_classify, |
|
use_doc_unwarping, |
|
use_textline_orientation, |
|
use_region_detection, |
|
use_seal_recognition, |
|
use_table_recognition, |
|
layout_threshold, |
|
layout_nms, |
|
layout_unclip_ratio, |
|
text_det_limit_type, |
|
text_det_limit_side_len, |
|
text_det_thresh, |
|
text_det_box_thresh, |
|
text_det_unclip_ratio, |
|
text_rec_score_thresh, |
|
seal_det_limit_type, |
|
seal_det_limit_side_len, |
|
seal_det_thresh, |
|
seal_det_box_thresh, |
|
seal_det_unclip_ratio, |
|
seal_rec_score_thresh, |
|
use_ocr_results_with_table_cells, |
|
use_e2e_wired_table_rec_model, |
|
use_e2e_wireless_table_rec_model, |
|
use_wired_table_cells_trans_to_html, |
|
use_wireless_table_cells_trans_to_html, |
|
use_table_orientation_classify, |
|
): |
|
"""Process uploaded file with API""" |
|
try: |
|
if not file_path and not image_input: |
|
raise ValueError("Please upload a file first") |
|
if file_path: |
|
if Path(file_path).suffix == ".pdf": |
|
file_type = "pdf" |
|
else: |
|
file_type = "image" |
|
else: |
|
file_path = image_input |
|
file_type = "image" |
|
|
|
with open(file_path, "rb") as f: |
|
file_bytes = f.read() |
|
|
|
|
|
file_data = base64.b64encode(file_bytes).decode("ascii") |
|
headers = { |
|
"Authorization": f"token {TOKEN}", |
|
"Content-Type": "application/json", |
|
} |
|
|
|
response = requests.post( |
|
API_URL, |
|
json={ |
|
"file": file_data, |
|
"fileType": 0 if file_type == "pdf" else 1, |
|
"useFormulaRecognition": use_formula_recognition, |
|
"useChartRecognition": use_chart_recognition, |
|
"useDocOrientationClassify": use_doc_orientation_classify, |
|
"useDocUnwarping": use_doc_unwarping, |
|
"useTextlineOrientation": use_textline_orientation, |
|
"useSealRecognition": use_seal_recognition, |
|
"useRegionDetection": use_region_detection, |
|
"useTableRecognition": use_table_recognition, |
|
"layoutThreshold": layout_threshold, |
|
"layoutNms": layout_nms, |
|
"layoutUnclipRatio": layout_unclip_ratio, |
|
"textDetLimitType": text_det_limit_type, |
|
"textTetLimitSideLen": text_det_limit_side_len, |
|
"textDetThresh": text_det_thresh, |
|
"textDetBoxThresh": text_det_box_thresh, |
|
"textDetUnclipRatio": text_det_unclip_ratio, |
|
"textRecScoreThresh": text_rec_score_thresh, |
|
"sealDetLimitType": seal_det_limit_type, |
|
"sealDetLimitSideLen": seal_det_limit_side_len, |
|
"sealDetThresh": seal_det_thresh, |
|
"sealDetBoxThresh": seal_det_box_thresh, |
|
"sealDetUnclipRatio": seal_det_unclip_ratio, |
|
"sealRecScoreThresh": seal_rec_score_thresh, |
|
"useOcrResultsWithTableCells": use_ocr_results_with_table_cells, |
|
"useE2eWiredTableRecModel": use_e2e_wired_table_rec_model, |
|
"useE2eWirelessTableRecModel": use_e2e_wireless_table_rec_model, |
|
"useWiredTableCellsTransToHtml": use_wired_table_cells_trans_to_html, |
|
"useWirelessWableCellsTransToHtml": use_wireless_table_cells_trans_to_html, |
|
"useTableOrientationClassify": use_table_orientation_classify, |
|
}, |
|
headers=headers, |
|
timeout=1000, |
|
) |
|
try: |
|
response.raise_for_status() |
|
except requests.exceptions.RequestException as e: |
|
raise RuntimeError("API request failed") from e |
|
|
|
result = response.json() |
|
layout_results = result.get("result", {}).get("layoutParsingResults", []) |
|
layout_ordering_images = [] |
|
layout_det_res_images = [] |
|
overall_ocr_res_images = [] |
|
output_json = result.get("result", {}) |
|
markdown_texts = [] |
|
markdown_images = [] |
|
markdown_content_list = [] |
|
input_images = [] |
|
input_images_gallery = [] |
|
for res in layout_results: |
|
layout_ordering_images.append( |
|
url_to_bytes(res["outputImages"]["layout_order_res"]) |
|
) |
|
layout_det_res_images.append( |
|
url_to_bytes(res["outputImages"]["layout_det_res"]) |
|
) |
|
overall_ocr_res_images.append( |
|
url_to_bytes(res["outputImages"]["overall_ocr_res"]) |
|
) |
|
markdown = res["markdown"] |
|
markdown_text = markdown["text"] |
|
markdown_texts.append(markdown_text) |
|
img_path_to_url = markdown["images"] |
|
img_path_to_bytes = {} |
|
for path, url in img_path_to_url.items(): |
|
img_path_to_bytes[path] = url_to_bytes(url) |
|
markdown_images.append(img_path_to_bytes) |
|
input_images.append(url_to_bytes(res["inputImage"])) |
|
input_images_gallery.append(res["inputImage"]) |
|
markdown_content = embed_images_into_markdown_text( |
|
markdown_text, img_path_to_url |
|
) |
|
markdown_content_list.append(markdown_content) |
|
|
|
markdown_list = [] |
|
for res, cont in zip(layout_results, markdown_content_list): |
|
markdown = res["markdown"].copy() |
|
markdown["text"] = cont |
|
markdown_list.append(markdown) |
|
concatenated_markdown_content = concatenate_markdown_pages(markdown_list) |
|
|
|
return { |
|
"original_file": file_path, |
|
"file_type": file_type, |
|
"layout_ordering_images": layout_ordering_images, |
|
"layout_det_res_images": layout_det_res_images, |
|
"overall_ocr_res_images": overall_ocr_res_images, |
|
"output_json": output_json, |
|
"markdown_texts": markdown_texts, |
|
"markdown_images": markdown_images, |
|
"markdown_content_list": markdown_content_list, |
|
"concatenated_markdown_content": concatenated_markdown_content, |
|
"input_images": input_images, |
|
"input_images_gallery": input_images_gallery, |
|
"api_response": result, |
|
} |
|
except Exception as e: |
|
raise gr.Error(f"Error processing file: {str(e)}") |
|
|
|
|
|
def export_full_results(results): |
|
"""Create ZIP file with all analysis results""" |
|
try: |
|
global tmp_time |
|
if not results: |
|
raise ValueError("No results to export") |
|
|
|
filename = Path(results["original_file"]).stem + f"_{uuid.uuid4().hex}.zip" |
|
zip_path = Path(TEMP_DIR.name, filename) |
|
|
|
with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED) as zipf: |
|
for i, img_bytes in enumerate(results["layout_ordering_images"]): |
|
zipf.writestr(f"layout_ordering_images/page_{i+1}.jpg", img_bytes) |
|
|
|
for i, img_bytes in enumerate(results["layout_det_res_images"]): |
|
zipf.writestr(f"layout_det_res_images/page_{i+1}.jpg", img_bytes) |
|
|
|
for i, img_bytes in enumerate(results["overall_ocr_res_images"]): |
|
zipf.writestr(f"overall_ocr_res_images/page_{i+1}.jpg", img_bytes) |
|
|
|
zipf.writestr( |
|
"output.json", |
|
json.dumps(results["output_json"], indent=2, ensure_ascii=False), |
|
) |
|
|
|
for i, (md_text, md_imgs) in enumerate( |
|
zip( |
|
results["markdown_texts"], |
|
results["markdown_images"], |
|
) |
|
): |
|
zipf.writestr(f"markdown/page_{i+1}.md", md_text) |
|
for img_path, img_bytes in md_imgs.items(): |
|
zipf.writestr(f"markdown/{img_path}", img_bytes) |
|
|
|
|
|
api_response = results.get("api_response", {}) |
|
zipf.writestr( |
|
"api_response.json", |
|
json.dumps(api_response, indent=2, ensure_ascii=False), |
|
) |
|
|
|
for i, img_bytes in enumerate(results["input_images"]): |
|
zipf.writestr(f"input_images/page_{i+1}.jpg", img_bytes) |
|
with lock: |
|
tmp_time[zip_path] = time.time() |
|
return str(zip_path) |
|
|
|
except Exception as e: |
|
raise gr.Error(f"Error creating ZIP file: {str(e)}") |
|
|
|
|
|
def on_file_change(file): |
|
if file: |
|
return gr.Textbox( |
|
value=f"✅ Chosen file: {os.path.basename(file.name)}", visible=True |
|
) |
|
else: |
|
return gr.Textbox() |
|
|
|
|
|
def clear_file_selection(): |
|
return gr.File(value=None), gr.Textbox(value=None) |
|
|
|
|
|
def clear_file_selection_examples(image_input): |
|
text_name = "✅ Chosen file: " + os.path.basename(image_input) |
|
return gr.File(value=None), gr.Textbox(value=text_name, visible=True) |
|
|
|
|
|
def toggle_sections(choice): |
|
return { |
|
Module_Options: gr.Column(visible=(choice == "Module Options")), |
|
Subpipeline_Options: gr.Column(visible=(choice == "Subpipeline Options")), |
|
Layout_region_detection_Options: gr.Column( |
|
visible=(choice == "Layout region detection Options") |
|
), |
|
Text_detection_Options: gr.Column(visible=(choice == "Text detection Options")), |
|
Seal_text_recognition_Options: gr.Column( |
|
visible=(choice == "Seal text recognition Options") |
|
), |
|
Table_recognition_Options: gr.Column( |
|
visible=(choice == "Table recognition Options") |
|
), |
|
} |
|
|
|
|
|
|
|
def toggle_spinner(): |
|
return ( |
|
gr.Column(visible=True), |
|
gr.Column(visible=False), |
|
gr.File(visible=False), |
|
gr.update(visible=False), |
|
gr.update(visible=False), |
|
) |
|
|
|
|
|
def hide_spinner(): |
|
return gr.Column(visible=False), gr.update(visible=True) |
|
|
|
|
|
def update_display(results, concatenate_pages): |
|
if not results: |
|
return gr.skip() |
|
|
|
assert len(results["layout_ordering_images"]) <= MAX_NUM_PAGES, len( |
|
results["layout_ordering_images"] |
|
) |
|
assert len(results["layout_det_res_images"]) <= MAX_NUM_PAGES, len( |
|
results["layout_det_res_images"] |
|
) |
|
assert len(results["overall_ocr_res_images"]) <= MAX_NUM_PAGES, len( |
|
results["overall_ocr_res_images"] |
|
) |
|
assert len(results["input_images_gallery"]) <= MAX_NUM_PAGES, len( |
|
results["input_images_gallery"] |
|
) |
|
gallery_list_imgs = [] |
|
for i in range(len(gallery_list)): |
|
gallery_list_imgs.append( |
|
gr.Gallery( |
|
value=results["input_images_gallery"], |
|
rows=len(results["input_images_gallery"]), |
|
) |
|
) |
|
|
|
layout_order_imgs = [] |
|
for img in results["layout_ordering_images"]: |
|
layout_order_imgs.append(gr.Image(value=bytes_to_image(img), visible=True)) |
|
for _ in range(len(results["layout_ordering_images"]), MAX_NUM_PAGES): |
|
layout_order_imgs.append(gr.Image(visible=False)) |
|
|
|
layout_det_imgs = [] |
|
for img in results["layout_det_res_images"]: |
|
layout_det_imgs.append(gr.Image(value=bytes_to_image(img), visible=True)) |
|
for _ in range(len(results["layout_det_res_images"]), MAX_NUM_PAGES): |
|
layout_det_imgs.append(gr.Image(visible=False)) |
|
|
|
ocr_imgs = [] |
|
for img in results["overall_ocr_res_images"]: |
|
ocr_imgs.append(gr.Image(value=bytes_to_image(img), visible=True)) |
|
for _ in range(len(results["overall_ocr_res_images"]), MAX_NUM_PAGES): |
|
ocr_imgs.append(gr.Image(visible=False)) |
|
|
|
output_json = [gr.Markdown(value=results["output_json"], visible=True)] |
|
|
|
if concatenate_pages: |
|
markdown_content = results["concatenated_markdown_content"] |
|
ret_cont = [gr.Markdown(value=markdown_content, visible=True)] |
|
for _ in range(1, MAX_NUM_PAGES): |
|
ret_cont.append(gr.Markdown(visible=False)) |
|
else: |
|
assert len(results["markdown_content_list"]) <= MAX_NUM_PAGES, len( |
|
results["markdown_content_list"] |
|
) |
|
ret_cont = [] |
|
for cont in results["markdown_content_list"]: |
|
ret_cont.append(gr.Markdown(value=cont, visible=True)) |
|
for _ in range(len(results["markdown_content_list"]), MAX_NUM_PAGES): |
|
ret_cont.append(gr.Markdown(visible=False)) |
|
return ( |
|
layout_order_imgs |
|
+ layout_det_imgs |
|
+ ocr_imgs |
|
+ output_json |
|
+ ret_cont |
|
+ gallery_list_imgs |
|
) |
|
|
|
|
|
def update_image(evt: gr.SelectData): |
|
update_images = [] |
|
for index in range(MAX_NUM_PAGES): |
|
update_images.append( |
|
gr.Image(visible=False) if index != evt.index else gr.Image(visible=True) |
|
) |
|
return update_images |
|
|
|
|
|
def update_markdown(concatenate_pages, evt: gr.SelectData): |
|
update_markdowns = [] |
|
if not concatenate_pages: |
|
for index in range(MAX_NUM_PAGES): |
|
update_markdowns.append( |
|
gr.Markdown(visible=False) |
|
if index != evt.index |
|
else gr.Markdown(visible=True) |
|
) |
|
else: |
|
gr.Warning( |
|
"When page merging is on, the thumbnail-to-page linking is disabled. If you want to navigate to the corresponding page when clicking on the thumbnail, please turn off page merging." |
|
) |
|
for index in range(MAX_NUM_PAGES): |
|
update_markdowns.append( |
|
gr.Markdown(visible=True) if index == 0 else gr.Markdown(visible=False) |
|
) |
|
return update_markdowns |
|
|
|
|
|
def delete_file_periodically(): |
|
global tmp_time |
|
while True: |
|
current_time = time.time() |
|
delete_tmp = [] |
|
for filename, strat_time in list(tmp_time.items()): |
|
if (current_time - strat_time) >= TMP_DELETE_TIME: |
|
if os.path.exists(filename): |
|
os.remove(filename) |
|
delete_tmp.append(filename) |
|
for filename in delete_tmp: |
|
with lock: |
|
del tmp_time[filename] |
|
time.sleep(THREAD_WAKEUP_TIME) |
|
|
|
|
|
with gr.Blocks(css=CSS, title="Document Analysis System", theme=paddle_theme) as demo: |
|
results_state = gr.State() |
|
|
|
with gr.Column(elem_classes=["logo-container"]): |
|
gr.HTML(f'<img src="{LOGO_BASE64}" class="logo-img">') |
|
gr.Markdown( |
|
""" |
|
Since our inference server is deployed in mainland China, cross-border |
|
network transmission may be slow, which could result in a suboptimal experience on Hugging Face. |
|
We recommend visiting the [PaddlePaddle AI Studio Community](https://aistudio.baidu.com/community/app/518494/webUI?source=appCenter) to try the demo for a smoother experience. |
|
""", |
|
elem_classes=["tight-spacing-as"], |
|
visible=True, |
|
) |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=4): |
|
file_input = gr.File( |
|
label="Upload document", |
|
file_types=[".pdf", ".jpg", ".jpeg", ".png"], |
|
type="filepath", |
|
visible=False, |
|
) |
|
file_select = gr.Textbox(label="Select File Path", visible=False) |
|
image_input = gr.Image( |
|
label="Image", |
|
sources="upload", |
|
type="filepath", |
|
visible=False, |
|
interactive=True, |
|
placeholder="Click to upload image...", |
|
) |
|
pdf_btn = gr.Button( |
|
"Click to upload file...", |
|
variant="primary", |
|
icon="icon/upload.png", |
|
elem_classes=["square-pdf-btn"], |
|
) |
|
examples_image = gr.Examples( |
|
fn=clear_file_selection_examples, |
|
inputs=image_input, |
|
outputs=[file_input, file_select], |
|
examples_per_page=11, |
|
examples=EXAMPLES_TEST, |
|
run_on_click=True, |
|
) |
|
|
|
file_input.change( |
|
fn=on_file_change, inputs=file_input, outputs=[file_select] |
|
) |
|
concatenate_pages_md = gr.Markdown( |
|
"### Merge pages", elem_id="concatenate_pages_md" |
|
) |
|
concatenate_pages_rd = gr.Radio( |
|
choices=[("yes", True), ("no", False)], |
|
value=False, |
|
interactive=True, |
|
show_label=False, |
|
elem_id="concatenate_pages_rd", |
|
) |
|
with gr.Column(): |
|
section_choice = gr.Dropdown( |
|
choices=[ |
|
"Subpipeline Options", |
|
"Module Options", |
|
"Layout region detection Options", |
|
"Text detection Options", |
|
"Seal text recognition Options", |
|
"Table recognition Options", |
|
], |
|
value="Subpipeline Options", |
|
label="Advance Options", |
|
show_label=True, |
|
container=True, |
|
scale=0, |
|
elem_classes=["tight-spacing"], |
|
) |
|
with gr.Column(visible=False) as Module_Options: |
|
use_chart_recognition_md = gr.Markdown( |
|
"### Using the chart parsing module", |
|
elem_id="use_chart_recognition_md", |
|
) |
|
use_chart_recognition_rd = gr.Radio( |
|
choices=[("yes", True), ("no", False)], |
|
value=False, |
|
interactive=True, |
|
show_label=False, |
|
elem_id="use_chart_recognition_rd", |
|
) |
|
use_region_detection_md = gr.Markdown( |
|
"### Using the layout region detection module", |
|
elem_id="use_region_detection_md", |
|
) |
|
use_region_detection_rd = gr.Radio( |
|
choices=[("yes", True), ("no", False)], |
|
value=True, |
|
interactive=True, |
|
show_label=False, |
|
elem_id="use_region_detection_rd", |
|
) |
|
use_doc_orientation_classify_md = gr.Markdown( |
|
"### Using the document image orientation classification module", |
|
elem_id="use_doc_orientation_classify_md", |
|
) |
|
use_doc_orientation_classify_rd = gr.Radio( |
|
choices=[("yes", True), ("no", False)], |
|
value=False, |
|
interactive=True, |
|
show_label=False, |
|
elem_id="use_doc_orientation_classify_rd", |
|
) |
|
use_doc_unwarping_md = gr.Markdown( |
|
"### Using the document unwarping module", |
|
elem_id="use_doc_unwarping_md", |
|
) |
|
use_doc_unwarping_rd = gr.Radio( |
|
choices=[("yes", True), ("no", False)], |
|
value=False, |
|
interactive=True, |
|
show_label=False, |
|
elem_id="use_doc_unwarping_rd", |
|
) |
|
use_textline_orientation_md = gr.Markdown( |
|
"### Using the text line orientation classification module", |
|
elem_id="use_textline_orientation_md", |
|
) |
|
use_textline_orientation_rd = gr.Radio( |
|
choices=[("yes", True), ("no", False)], |
|
value=False, |
|
interactive=True, |
|
show_label=False, |
|
elem_id="use_textline_orientation_rd", |
|
) |
|
with gr.Column(visible=True) as Subpipeline_Options: |
|
use_seal_recognition_md = gr.Markdown( |
|
"### Using the seal text recognition subpipeline", |
|
elem_id="use_seal_recognition_md", |
|
) |
|
use_seal_recognition_rd = gr.Radio( |
|
choices=[("yes", True), ("no", False)], |
|
value=True, |
|
interactive=True, |
|
show_label=False, |
|
elem_id="use_seal_recognition_rd", |
|
) |
|
use_formula_recognition_md = gr.Markdown( |
|
"### Using the formula recognition subpipeline", |
|
elem_id="use_formula_recognition_md", |
|
) |
|
use_formula_recognition_rd = gr.Radio( |
|
choices=[("yes", True), ("no", False)], |
|
value=True, |
|
interactive=True, |
|
show_label=False, |
|
elem_id="use_formula_recognition_rd", |
|
) |
|
use_table_recognition_md = gr.Markdown( |
|
"### Using the table recognition subpipeline", |
|
elem_id="use_table_recognition_md", |
|
) |
|
use_table_recognition_rd = gr.Radio( |
|
choices=[("yes", True), ("no", False)], |
|
value=True, |
|
interactive=True, |
|
show_label=False, |
|
elem_id="use_table_recognition_rd", |
|
) |
|
with gr.Column(visible=False) as Layout_region_detection_Options: |
|
layout_threshold_md = gr.Markdown( |
|
"### Score threshold of layout region detection model", |
|
elem_id="layout_threshold_md", |
|
) |
|
layout_threshold_nb = gr.Number( |
|
value=0.5, |
|
step=0.1, |
|
minimum=0, |
|
maximum=1, |
|
interactive=True, |
|
show_label=False, |
|
elem_id="layout_threshold_nb", |
|
) |
|
layout_nms_md = gr.Markdown( |
|
"### NMS post-processing of layout region detection", |
|
elem_id="layout_nms_md", |
|
) |
|
layout_nms_rd = gr.Radio( |
|
choices=[("yes", True), ("no", False)], |
|
value=True, |
|
interactive=True, |
|
show_label=False, |
|
elem_id="layout_nms_rd", |
|
) |
|
layout_unclip_ratio_md = gr.Markdown( |
|
"### Layout region detection expansion coefficient", |
|
elem_id="layout_unclip_ratio_md", |
|
) |
|
layout_unclip_ratio_nb = gr.Number( |
|
value=1.0, |
|
step=0.1, |
|
minimum=0, |
|
maximum=10.0, |
|
interactive=True, |
|
show_label=False, |
|
elem_id="layout_unclip_ratio_nb", |
|
) |
|
with gr.Column(visible=False) as Text_detection_Options: |
|
text_det_limit_type_md = gr.Markdown( |
|
"### Image side length restriction type for text detection", |
|
elem_id="text_det_limit_type_md", |
|
) |
|
text_det_limit_type_rd = gr.Radio( |
|
choices=[("Short side", "min"), ("Long side", "max")], |
|
value="min", |
|
interactive=True, |
|
show_label=False, |
|
elem_id="text_det_limit_type_rd", |
|
) |
|
text_det_limit_side_len_md = gr.Markdown( |
|
"### Image side length limitation for text detection", |
|
elem_id="text_det_limit_side_len_md", |
|
) |
|
text_det_limit_side_len_nb = gr.Number( |
|
value=736, |
|
step=1, |
|
minimum=0, |
|
maximum=10000, |
|
interactive=True, |
|
show_label=False, |
|
elem_id="text_det_limit_side_len_nb", |
|
) |
|
text_det_thresh_md = gr.Markdown( |
|
"### Text detection pixel threshold", |
|
elem_id="text_det_thresh_md", |
|
) |
|
text_det_thresh_nb = gr.Number( |
|
value=0.30, |
|
step=0.01, |
|
minimum=0.00, |
|
maximum=1.00, |
|
interactive=True, |
|
show_label=False, |
|
elem_id="text_det_thresh_nb", |
|
) |
|
text_det_box_thresh_md = gr.Markdown( |
|
"### Text detection box threshold", |
|
elem_id="text_det_box_thresh_md", |
|
) |
|
text_det_box_thresh_nb = gr.Number( |
|
value=0.60, |
|
step=0.01, |
|
minimum=0.00, |
|
maximum=1.00, |
|
interactive=True, |
|
show_label=False, |
|
elem_id="text_det_box_thresh_nb", |
|
) |
|
text_det_unclip_ratio_md = gr.Markdown( |
|
"### Text detection unclip ratio", |
|
elem_id="text_det_unclip_ratio_md", |
|
) |
|
text_det_unclip_ratio_nb = gr.Number( |
|
value=1.5, |
|
step=0.1, |
|
minimum=0, |
|
maximum=10.0, |
|
interactive=True, |
|
show_label=False, |
|
elem_id="text_det_unclip_ratio_nb", |
|
) |
|
|
|
text_rec_score_thresh_md = gr.Markdown( |
|
"### Text recognition score threshold", |
|
elem_id="text_rec_score_thresh_md", |
|
) |
|
text_rec_score_thresh_nb = gr.Number( |
|
value=0.00, |
|
step=0.01, |
|
minimum=0, |
|
maximum=1.00, |
|
interactive=True, |
|
show_label=False, |
|
elem_id="text_rec_score_thresh_nb", |
|
) |
|
|
|
with gr.Column(visible=False) as Seal_text_recognition_Options: |
|
seal_det_limit_type_md = gr.Markdown( |
|
"### Image side length restriction type for seal text detection", |
|
elem_id="seal_det_limit_type_md", |
|
) |
|
seal_det_limit_type_rd = gr.Radio( |
|
choices=[("Short side", "min"), ("Long side", "max")], |
|
value="min", |
|
interactive=True, |
|
show_label=False, |
|
elem_id="seal_det_limit_type_rd", |
|
) |
|
seal_det_limit_side_len_md = gr.Markdown( |
|
"### Image side length limitation for seal text detection", |
|
elem_id="seal_det_limit_side_len_md", |
|
) |
|
seal_det_limit_side_len_nb = gr.Number( |
|
value=736, |
|
step=1, |
|
minimum=0, |
|
maximum=10000, |
|
interactive=True, |
|
show_label=False, |
|
elem_id="seal_det_limit_side_len_nb", |
|
) |
|
seal_det_thresh_md = gr.Markdown( |
|
"### Pixel threshold for seal text detection", |
|
elem_id="seal_det_thresh_md", |
|
) |
|
seal_det_thresh_nb = gr.Number( |
|
value=0.20, |
|
step=0.01, |
|
minimum=0.00, |
|
maximum=1.00, |
|
interactive=True, |
|
show_label=False, |
|
elem_id="seal_det_thresh_nb", |
|
) |
|
seal_det_box_thresh_md = gr.Markdown( |
|
"### Seal text detection box threshold", |
|
elem_id="seal_det_box_thresh_md", |
|
) |
|
seal_det_box_thresh_nb = gr.Number( |
|
value=0.60, |
|
step=0.01, |
|
minimum=0.00, |
|
maximum=1.00, |
|
interactive=True, |
|
show_label=False, |
|
elem_id="seal_det_box_thresh_nb", |
|
) |
|
seal_det_unclip_ratio_md = gr.Markdown( |
|
"### Seal text detection unclip ratio", |
|
elem_id="seal_det_unclip_ratio_md", |
|
) |
|
seal_det_unclip_ratio_nb = gr.Number( |
|
value=0.5, |
|
step=0.1, |
|
minimum=0, |
|
maximum=10.0, |
|
interactive=True, |
|
show_label=False, |
|
elem_id="seal_det_unclip_ratio_nb", |
|
) |
|
seal_rec_score_thresh_md = gr.Markdown( |
|
"### Seal text detection threshold", |
|
elem_id="seal_rec_score_thresh_md", |
|
) |
|
seal_rec_score_thresh_nb = gr.Number( |
|
value=0.00, |
|
step=0.01, |
|
minimum=0, |
|
maximum=1.00, |
|
interactive=True, |
|
show_label=False, |
|
elem_id="seal_rec_score_thresh_nb", |
|
) |
|
with gr.Column(visible=False) as Table_recognition_Options: |
|
use_ocr_results_with_table_cells_md = gr.Markdown( |
|
"### Cell OCR mode", |
|
elem_id="use_ocr_results_with_table_cells_md", |
|
) |
|
use_ocr_results_with_table_cells_rd = gr.Radio( |
|
choices=[("yes", True), ("no", False)], |
|
value=True, |
|
interactive=True, |
|
show_label=False, |
|
elem_id="use_ocr_results_with_table_cells_rd", |
|
) |
|
use_e2e_wired_table_rec_model_md = gr.Markdown( |
|
"### Wired Table End-to-End Prediction model", |
|
elem_id="use_e2e_wired_table_rec_model_md", |
|
) |
|
use_e2e_wired_table_rec_model_rd = gr.Radio( |
|
choices=[("yes", True), ("no", False)], |
|
value=False, |
|
interactive=True, |
|
show_label=False, |
|
elem_id="use_e2e_wired_table_rec_model_rd", |
|
) |
|
use_e2e_wireless_table_rec_model_md = gr.Markdown( |
|
"### Wireless Table End-to-End Prediction model", |
|
elem_id="use_e2e_wireless_table_rec_model_md", |
|
) |
|
use_e2e_wireless_table_rec_model_rd = gr.Radio( |
|
choices=[("yes", True), ("no", False)], |
|
value=False, |
|
interactive=True, |
|
show_label=False, |
|
elem_id="use_e2e_wireless_table_rec_model_rd", |
|
) |
|
use_wired_table_cells_trans_to_html_md = gr.Markdown( |
|
"### Wired table to HTML mode", |
|
elem_id="use_wired_table_cells_trans_to_html_md", |
|
) |
|
use_wired_table_cells_trans_to_html_rd = gr.Radio( |
|
choices=[("yes", True), ("no", False)], |
|
value=False, |
|
interactive=True, |
|
show_label=False, |
|
elem_id="use_wired_table_cells_trans_to_html_rd", |
|
) |
|
use_wireless_table_cells_trans_to_html_md = gr.Markdown( |
|
"### Wireless table to HTML mode", |
|
elem_id="use_wireless_table_cells_trans_to_html_md", |
|
) |
|
use_wireless_table_cells_trans_to_html_rd = gr.Radio( |
|
choices=[("yes", True), ("no", False)], |
|
value=False, |
|
interactive=True, |
|
show_label=False, |
|
elem_id="use_wireless_table_cells_trans_to_html_rd", |
|
) |
|
use_table_orientation_classify_md = gr.Markdown( |
|
"### Using table orientation classify module", |
|
elem_id="use_table_orientation_classify_md", |
|
) |
|
use_table_orientation_classify_rd = gr.Radio( |
|
choices=[("yes", True), ("no", False)], |
|
value=True, |
|
interactive=True, |
|
show_label=False, |
|
elem_id="use_table_orientation_classify_rd", |
|
) |
|
with gr.Row(): |
|
process_btn = gr.Button( |
|
"🚀 Parse Document", elem_id="analyze-btn", variant="primary" |
|
) |
|
|
|
gr.Markdown( |
|
f""" |
|
1. Only the first {MAX_NUM_PAGES} pages will be processed. |
|
2. Some formulas might not display correctly because of renderer limitations or syntax errors. |
|
""" |
|
) |
|
|
|
|
|
with gr.Column(scale=7): |
|
gr.Markdown("### Results", elem_classes="custom-markdown") |
|
loading_spinner = gr.Column( |
|
visible=False, elem_classes=["loader-container"] |
|
) |
|
with loading_spinner: |
|
gr.HTML( |
|
""" |
|
<div class="loader"></div> |
|
<p>Processing, please wait...</p> |
|
""" |
|
) |
|
prepare_spinner = gr.Column( |
|
visible=True, elem_classes=["loader-container-prepare"] |
|
) |
|
with prepare_spinner: |
|
gr.HTML( |
|
""" |
|
<div style=" |
|
max-width: 100%; |
|
max-height: 100%; |
|
margin: 24px 0 0 12px; |
|
padding: 24px 32px; |
|
border: 2px solid #A8C1E7; |
|
border-radius: 12px; |
|
background: #f8faff; |
|
box-shadow: 0 2px 8px rgba(100,150,200,0.08); |
|
font-size: 18px; |
|
"> |
|
<b>🚀 User Guide</b><br> |
|
<b>Step 1:</b> Upload Your File<br> |
|
Supported formats: JPG, PNG, PDF, JPEG<br> |
|
<b>Step 2:</b> Click Analyze Document Button<br> |
|
System will process automatically<br> |
|
<b>Step 3:</b> Wait for Results<br> |
|
Results will be displayed after processing<br> |
|
<b>Step 4:</b> Download results zip<br> |
|
Results zip will be displayed after processing<br><br> |
|
<b>⚠️Special Attention:</b> For safety reasons, please make sure that the uploaded files do not contain personal information. |
|
</div> |
|
""" |
|
) |
|
download_file = gr.File(visible=False, label="Download File") |
|
markdown_display_list = [] |
|
layout_ordering_images = [] |
|
layout_det_res_images = [] |
|
overall_ocr_res_images = [] |
|
output_json_list = [] |
|
gallery_list = [] |
|
with gr.Tabs(visible=False) as tabs: |
|
with gr.Tab("Layout Parsing"): |
|
with gr.Row(): |
|
with gr.Column(scale=2, min_width=1): |
|
gallery_markdown = gr.Gallery( |
|
show_label=False, |
|
allow_preview=False, |
|
preview=False, |
|
columns=1, |
|
min_width=10, |
|
object_fit="contain", |
|
visible=True, |
|
) |
|
gallery_list.append(gallery_markdown) |
|
with gr.Column(scale=10): |
|
for i in range(MAX_NUM_PAGES): |
|
markdown_display_list.append( |
|
gr.Markdown( |
|
visible=False, |
|
container=True, |
|
show_copy_button=True, |
|
latex_delimiters=[ |
|
{ |
|
"left": "$$", |
|
"right": "$$", |
|
"display": True, |
|
}, |
|
{ |
|
"left": "$", |
|
"right": "$", |
|
"display": False, |
|
}, |
|
], |
|
elem_classes=["image-container"], |
|
) |
|
) |
|
with gr.Tab("Reading Order"): |
|
with gr.Row(): |
|
with gr.Column(scale=2, min_width=1): |
|
gallery_layout_order = gr.Gallery( |
|
show_label=False, |
|
allow_preview=False, |
|
preview=False, |
|
columns=1, |
|
min_width=10, |
|
object_fit="contain", |
|
) |
|
gallery_list.append(gallery_layout_order) |
|
with gr.Column(scale=10): |
|
for i in range(MAX_NUM_PAGES): |
|
layout_ordering_images.append( |
|
gr.Image( |
|
label=f"Layout Ordering Image {i}", |
|
show_label=True, |
|
visible=False, |
|
container=True, |
|
) |
|
) |
|
with gr.Tab("Layout Region Detection"): |
|
with gr.Row(): |
|
with gr.Column(scale=2, min_width=1): |
|
gallery_layout_det = gr.Gallery( |
|
show_label=False, |
|
allow_preview=False, |
|
preview=False, |
|
columns=1, |
|
min_width=10, |
|
object_fit="contain", |
|
) |
|
gallery_list.append(gallery_layout_det) |
|
with gr.Column(scale=10): |
|
for i in range(MAX_NUM_PAGES): |
|
layout_det_res_images.append( |
|
gr.Image( |
|
label=f"Layout Detection Image {i}", |
|
show_label=True, |
|
visible=False, |
|
) |
|
) |
|
with gr.Tab("OCR"): |
|
with gr.Row(): |
|
with gr.Column(scale=2, min_width=1): |
|
gallery_ocr_det = gr.Gallery( |
|
show_label=False, |
|
allow_preview=False, |
|
preview=False, |
|
columns=1, |
|
min_width=10, |
|
object_fit="contain", |
|
) |
|
gallery_list.append(gallery_ocr_det) |
|
with gr.Column(scale=10): |
|
for i in range(MAX_NUM_PAGES): |
|
overall_ocr_res_images.append( |
|
gr.Image( |
|
label=f"OCR Image {i}", |
|
show_label=True, |
|
visible=False, |
|
) |
|
) |
|
with gr.Tab("JSON"): |
|
with gr.Row(): |
|
with gr.Column(scale=2, min_width=1): |
|
gallery_json = gr.Gallery( |
|
show_label=False, |
|
allow_preview=False, |
|
preview=False, |
|
columns=1, |
|
min_width=10, |
|
object_fit="contain", |
|
) |
|
gallery_list.append(gallery_json) |
|
with gr.Column(scale=10): |
|
gr.HTML( |
|
""" |
|
<style> |
|
.line.svelte-19ir0ev svg { |
|
width: 30px !important; |
|
height: 30px !important; |
|
min-width: 30px !important; |
|
min-height: 30px !important; |
|
padding: 0 !important; |
|
font-size: 18px !important; |
|
} |
|
.line.svelte-19ir0ev span:contains('Object(') { |
|
font-size: 12px; |
|
} |
|
</style> |
|
""" |
|
) |
|
output_json_list.append( |
|
gr.JSON( |
|
visible=False, |
|
) |
|
) |
|
download_all_btn = gr.Button( |
|
"📦 Download Full Results (ZIP)", |
|
elem_id="unzip-btn", |
|
variant="primary", |
|
visible=False, |
|
) |
|
|
|
with gr.Column(elem_classes=["nav-bar"]): |
|
gr.HTML( |
|
""" |
|
<div class="nav-links"> |
|
<a href="https://github.com/PaddlePaddle/PaddleOCR" class="nav-link" target="_blank">GitHub</a> |
|
</div> |
|
""" |
|
) |
|
|
|
section_choice.change( |
|
fn=toggle_sections, |
|
inputs=section_choice, |
|
outputs=[ |
|
Module_Options, |
|
Subpipeline_Options, |
|
Layout_region_detection_Options, |
|
Text_detection_Options, |
|
Seal_text_recognition_Options, |
|
Table_recognition_Options, |
|
], |
|
) |
|
pdf_btn.click( |
|
fn=clear_file_selection, inputs=[], outputs=[file_input, file_select] |
|
).then( |
|
None, |
|
[], |
|
[], |
|
js=""" |
|
() => { |
|
const fileInput = document.querySelector('input[type="file"]'); |
|
fileInput.value = ''; |
|
fileInput.click(); |
|
} |
|
""", |
|
) |
|
process_btn.click( |
|
toggle_spinner, |
|
outputs=[ |
|
loading_spinner, |
|
prepare_spinner, |
|
download_file, |
|
tabs, |
|
download_all_btn, |
|
], |
|
).then( |
|
process_file, |
|
inputs=[ |
|
file_input, |
|
image_input, |
|
use_formula_recognition_rd, |
|
use_chart_recognition_rd, |
|
use_doc_orientation_classify_rd, |
|
use_doc_unwarping_rd, |
|
use_textline_orientation_rd, |
|
use_region_detection_rd, |
|
use_seal_recognition_rd, |
|
use_table_recognition_rd, |
|
layout_threshold_nb, |
|
layout_nms_rd, |
|
layout_unclip_ratio_nb, |
|
text_det_limit_type_rd, |
|
text_det_limit_side_len_nb, |
|
text_det_thresh_nb, |
|
text_det_box_thresh_nb, |
|
text_det_unclip_ratio_nb, |
|
text_rec_score_thresh_nb, |
|
seal_det_limit_type_rd, |
|
seal_det_limit_side_len_nb, |
|
seal_det_thresh_nb, |
|
seal_det_box_thresh_nb, |
|
seal_det_unclip_ratio_nb, |
|
seal_rec_score_thresh_nb, |
|
use_ocr_results_with_table_cells_rd, |
|
use_e2e_wired_table_rec_model_rd, |
|
use_e2e_wireless_table_rec_model_rd, |
|
use_wired_table_cells_trans_to_html_rd, |
|
use_wireless_table_cells_trans_to_html_rd, |
|
use_table_orientation_classify_rd, |
|
], |
|
outputs=[results_state], |
|
).then( |
|
hide_spinner, outputs=[loading_spinner, tabs] |
|
).then( |
|
update_display, |
|
inputs=[results_state, concatenate_pages_rd], |
|
outputs=layout_ordering_images |
|
+ layout_det_res_images |
|
+ overall_ocr_res_images |
|
+ output_json_list |
|
+ markdown_display_list |
|
+ gallery_list, |
|
).success( |
|
lambda: gr.update(visible=True), outputs=download_all_btn |
|
) |
|
|
|
gallery_markdown.select( |
|
update_markdown, |
|
inputs=concatenate_pages_rd, |
|
outputs=markdown_display_list, |
|
) |
|
gallery_layout_order.select(update_image, outputs=layout_ordering_images) |
|
gallery_layout_det.select(update_image, outputs=layout_det_res_images) |
|
gallery_ocr_det.select(update_image, outputs=overall_ocr_res_images) |
|
|
|
download_all_btn.click( |
|
export_full_results, inputs=[results_state], outputs=[download_file] |
|
).success(lambda: gr.File(visible=True), outputs=[download_file]) |
|
|
|
demo.load( |
|
fn=lambda: None, |
|
inputs=[], |
|
outputs=[], |
|
js=f""" |
|
() => {{ |
|
const tooltipTexts = {TOOLTIP_RADIO}; |
|
let tooltip = document.getElementById("custom-tooltip"); |
|
if (!tooltip) {{ |
|
tooltip = document.createElement("div"); |
|
tooltip.id = "custom-tooltip"; |
|
tooltip.style.position = "fixed"; |
|
tooltip.style.background = "rgba(0, 0, 0, 0.75)"; |
|
tooltip.style.color = "white"; |
|
tooltip.style.padding = "6px 10px"; |
|
tooltip.style.borderRadius = "4px"; |
|
tooltip.style.fontSize = "13px"; |
|
tooltip.style.maxWidth = "300px"; |
|
tooltip.style.zIndex = "10000"; |
|
tooltip.style.pointerEvents = "none"; |
|
tooltip.style.transition = "opacity 0.2s"; |
|
tooltip.style.opacity = "0"; |
|
tooltip.style.whiteSpace = "normal"; |
|
document.body.appendChild(tooltip); |
|
}} |
|
Object.keys(tooltipTexts).forEach(id => {{ |
|
const elem = document.getElementById(id); |
|
if (!elem) return; |
|
function showTooltip(e) {{ |
|
tooltip.style.opacity = "1"; |
|
tooltip.innerText = tooltipTexts[id]; |
|
let x = e.clientX + 10; |
|
let y = e.clientY + 10; |
|
if (x + tooltip.offsetWidth > window.innerWidth) {{ |
|
x = e.clientX - tooltip.offsetWidth - 10; |
|
}} |
|
if (y + tooltip.offsetHeight > window.innerHeight) {{ |
|
y = e.clientY - tooltip.offsetHeight - 10; |
|
}} |
|
tooltip.style.left = x + "px"; |
|
tooltip.style.top = y + "px"; |
|
}} |
|
function hideTooltip() {{ |
|
tooltip.style.opacity = "0"; |
|
}} |
|
elem.addEventListener("mousemove", showTooltip); |
|
elem.addEventListener("mouseleave", hideTooltip); |
|
}}); |
|
}} |
|
""", |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
t = threading.Thread(target=delete_file_periodically) |
|
t.start() |
|
demo.launch( |
|
server_name="0.0.0.0", |
|
server_port=7860, |
|
) |
|
|