Spaces:
Sleeping
Sleeping
File size: 6,056 Bytes
70b98b2 6bba885 70b98b2 6bba885 c66181c 6bba885 c66181c 6bba885 24a8aeb c66181c 6bba885 c66181c 11d8d89 c66181c 11d8d89 c66181c 642c3ad c66181c 642c3ad c66181c 642c3ad c66181c 642c3ad 6bba885 c66181c 6bba885 c66181c 70b98b2 c66181c 642c3ad c66181c 1d25483 c66181c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 |
from paddleocr import PaddleOCR
from gliner import GLiNER
from PIL import Image
import gradio as gr
import numpy as np
import logging
import tempfile
import pandas as pd
import re
import traceback
import zxingcpp
# --------------------------
# Configuration & Constants
# --------------------------
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
COUNTRY_CODES = {
'SAUDI': {'code': '+966', 'pattern': r'^(\+9665\d{8}|05\d{8})$'},
'UAE': {'code': '+971', 'pattern': r'^(\+9715\d{8}|05\d{8})$'}
}
VALIDATION_PATTERNS = {
'email': re.compile(r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b', re.IGNORECASE),
'website': re.compile(r'(?:https?://)?(?:www\.)?([A-Za-z0-9-]+\.[A-Za-z]{2,})'),
'name': re.compile(r'^[A-Z][a-z]+(?:\s+[A-Z][a-z]+){1,2}$')
}
# --------------------------
# Core Processing Functions
# --------------------------
def process_phone_number(raw_number: str) -> str:
"""Validate and standardize phone numbers for supported countries"""
cleaned = re.sub(r'[^\d+]', '', raw_number)
for country, config in COUNTRY_CODES.items():
if re.match(config['pattern'], cleaned):
if cleaned.startswith('0'):
return f"{config['code']}{cleaned[1:]}"
if cleaned.startswith('5'):
return f"{config['code']}{cleaned}"
return cleaned
return None
def extract_contact_info(text: str) -> dict:
"""Extract and validate all contact information from text"""
contacts = {
'phones': set(),
'emails': set(),
'websites': set()
}
# Phone number extraction
for match in re.finditer(r'(\+?\d{10,13}|05\d{8})', text):
if processed := process_phone_number(match.group()):
contacts['phones'].add(processed)
# Email validation
contacts['emails'].update(
email.lower() for email in VALIDATION_PATTERNS['email'].findall(text)
)
# Website normalization
for match in VALIDATION_PATTERNS['website'].finditer(text):
domain = match.group(1).lower()
if '.' in domain:
contacts['websites'].add(f"www.{domain.split('/')[0]}")
return {k: list(v) for k, v in contacts.items() if v}
def process_entities(entities: list, ocr_text: list) -> dict:
"""Process GLiNER entities with validation and fallbacks"""
result = {
'name': None,
'company': None,
'title': None,
'address': None
}
# Entity extraction
for entity in entities:
label = entity['label'].lower()
text = entity['text'].strip()
if label == 'person name' and VALIDATION_PATTERNS['name'].match(text):
result['name'] = text.title()
elif label == 'company name':
result['company'] = text
elif label == 'job title':
result['title'] = text.title()
elif label == 'address':
result['address'] = text
# Name fallback from OCR text
if not result['name']:
for text in ocr_text:
if VALIDATION_PATTERNS['name'].match(text):
result['name'] = text.title()
break
return result
# --------------------------
# Main Processing Pipeline
# --------------------------
def process_business_card(img: Image.Image, confidence: float) -> tuple:
"""Full processing pipeline for business card images"""
try:
# Initialize OCR
ocr_engine = PaddleOCR(lang='en', use_gpu=False)
# OCR Processing
ocr_result = ocr_engine.ocr(np.array(img), cls=True)
ocr_text = [line[1][0] for line in ocr_result[0]]
full_text = " ".join(ocr_text)
# Entity Recognition
labels = ["person name", "company name", "job title",
"phone number", "email address", "address",
"website"]
entities = gliner_model.predict_entities(full_text, labels, threshold=confidence)
# Data Extraction
contacts = extract_contact_info(full_text)
entity_data = process_entities(entities, ocr_text)
qr_data = zxingcpp.read_barcodes(np.array(img.convert('RGB')))
# Compile Final Results
results = {
'Person Name': entity_data['name'],
'Company Name': entity_data['company'] or (
contacts['emails'][0].split('@')[1].split('.')[0].title()
if contacts['emails'] else None
),
'Job Title': entity_data['title'],
'Phone Numbers': contacts['phones'],
'Email Addresses': contacts['emails'],
'Address': entity_data['address'] or next(
(t for t in ocr_text if any(kw in t.lower()
for kw in {'street', 'ave', 'road'})), None
),
'Website': contacts['websites'][0] if contacts['websites'] else None,
'QR Code': qr_data[0].text if qr_data else None
}
# Generate CSV Output
with tempfile.NamedTemporaryFile(suffix='.csv', delete=False, mode='w') as f:
pd.DataFrame([results]).to_csv(f)
csv_path = f.name
return full_text, results, csv_path, ""
except Exception as e:
logger.error(f"Processing Error: {traceback.format_exc()}")
return "", {}, None, f"Error: {str(e)}"
# --------------------------
# Gradio Interface
# --------------------------
interface = gr.Interface(
fn=process_business_card,
inputs=[
gr.Image(type='pil', label='Upload Business Card'),
gr.Slider(0.1, 1.0, value=0.4, label='Confidence Threshold')
],
outputs=[
gr.Textbox(label='OCR Result'),
gr.JSON(label='Structured Data'),
gr.File(label='Download CSV'),
gr.Textbox(label='Error Log')
],
title='Enterprise Business Card Parser',
description='Multi-country support with comprehensive validation'
)
if __name__ == '__main__':
interface.launch() |