MediQuery-AI / setup_deployment.py
bitphonix's picture
Upload 6 files
8500b5e verified
import os
import json
from datetime import datetime
# Create a requirements.txt file for Hugging Face Spaces deployment
requirements = [
"torch>=1.10.0",
"torchvision>=0.11.0",
"transformers>=4.18.0",
"gradio>=3.0.0",
"fastapi>=0.75.0",
"uvicorn>=0.17.0",
"pandas>=1.3.0",
"numpy>=1.20.0",
"Pillow>=9.0.0",
"faiss-cpu>=1.7.0",
"opencv-python-headless>=4.5.0",
"matplotlib>=3.5.0",
"tqdm>=4.62.0",
"python-multipart>=0.0.5"
]
# Write requirements to file
with open("requirements.txt", "w") as f:
for req in requirements:
f.write(f"{req}\n")
print("Created requirements.txt file for Hugging Face Spaces deployment")
# Create a README.md file for the Hugging Face Space
readme = """# MediQuery - AI Multimodal Medical Assistant
MediQuery is an AI-powered medical assistant that analyzes chest X-rays and answers medical queries using advanced deep learning models.
## Features
- **X-ray Analysis**: Upload a chest X-ray image for AI-powered analysis
- **Medical Query**: Ask questions about medical conditions, findings, and interpretations
- **Visual Explanations**: View attention maps highlighting important areas in X-rays
- **Comprehensive Reports**: Get detailed findings and impressions in structured format
## How to Use
### Image Analysis
1. Upload a chest X-ray image
2. Click "Analyze X-ray"
3. View the analysis results and attention map
### Text Query
1. Enter your medical question
2. Click "Submit Query"
3. Read the AI-generated response
## API Documentation
This Space also provides a REST API for integration with other applications:
- `POST /api/query`: Process a text query
- `POST /api/analyze-image`: Analyze an X-ray image
- `GET /api/health`: Check API health
## About
MediQuery combines state-of-the-art image models (DenseNet/CheXNet) with medical language models (BioBERT) and a fine-tuned FLAN-T5 generator to provide accurate and informative medical assistance.
Created by Tanishk Soni
"""
# Write README to file
with open("README.md", "w") as f:
f.write(readme)
print("Created README.md file for Hugging Face Spaces")
# Create a .gitignore file
gitignore = """# Python
__pycache__/
*.py[cod]
*$py.class
*.so
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
# Logs
logs/
*.log
# Temporary files
/tmp/
.DS_Store
# Virtual Environment
venv/
ENV/
# IDE
.idea/
.vscode/
*.swp
*.swo
# Model files (add these manually)
*.pt
*.pth
*.bin
*.faiss
"""
# Write .gitignore to file
with open(".gitignore", "w") as f:
f.write(gitignore)
print("Created .gitignore file")
# Create a simple script to download model weights
download_script = """import os
import torch
from torchvision import models
from transformers import AutoTokenizer, AutoModel, T5ForConditionalGeneration, T5Tokenizer
import faiss
import numpy as np
import pandas as pd
# Create directories
os.makedirs("models/flan-t5-finetuned", exist_ok=True)
os.makedirs("knowledge_base", exist_ok=True)
print("Downloading model weights...")
# Download image model (DenseNet121)
image_model = models.densenet121(pretrained=True)
torch.save(image_model.state_dict(), "models/densenet121.pt")
print("Downloaded DenseNet121 weights")
# Download text model (BioBERT)
tokenizer = AutoTokenizer.from_pretrained("dmis-lab/biobert-v1.1")
model = AutoModel.from_pretrained("dmis-lab/biobert-v1.1")
tokenizer.save_pretrained("models/biobert")
model.save_pretrained("models/biobert")
print("Downloaded BioBERT weights")
# Download generation model (FLAN-T5)
gen_tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-base")
gen_model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-base")
gen_tokenizer.save_pretrained("models/flan-t5-finetuned")
gen_model.save_pretrained("models/flan-t5-finetuned")
print("Downloaded FLAN-T5 weights")
# Create a minimal knowledge base
print("Creating minimal knowledge base...")
text_data = pd.DataFrame({
'combined_text': [
"The chest X-ray shows clear lung fields with no evidence of consolidation, effusion, or pneumothorax. The heart size is normal. No acute cardiopulmonary abnormality.",
"Bilateral patchy airspace opacities consistent with multifocal pneumonia. No pleural effusion or pneumothorax. Heart size is normal.",
"Cardiomegaly with pulmonary vascular congestion and bilateral pleural effusions, consistent with congestive heart failure. No pneumothorax or pneumonia.",
"Right upper lobe opacity concerning for pneumonia. No pleural effusion or pneumothorax. Heart size is normal.",
"Left lower lobe atelectasis. No pneumothorax or pleural effusion. Heart size is normal.",
"Bilateral pleural effusions with bibasilar atelectasis. Cardiomegaly present. Findings consistent with heart failure.",
"Right pneumothorax with partial lung collapse. No pleural effusion. Heart size is normal.",
"Endotracheal tube, central venous catheter, and nasogastric tube in place. No pneumothorax or pleural effusion.",
"Hyperinflated lungs with flattened diaphragms, consistent with COPD. No acute infiltrate or effusion.",
"Multiple rib fractures on the right side. No pneumothorax or hemothorax. Lung fields are clear."
],
'valid_index': list(range(10))
})
text_data.to_csv("knowledge_base/text_data.csv", index=False)
# Create dummy FAISS indices
text_dim = 768
text_embeddings = np.random.rand(len(text_data), text_dim).astype('float32')
image_dim = 1024
image_embeddings = np.random.rand(len(text_data), image_dim).astype('float32')
# Create FAISS indices
text_index = faiss.IndexFlatL2(text_dim)
text_index.add(text_embeddings)
faiss.write_index(text_index, "knowledge_base/text_index.faiss")
image_index = faiss.IndexFlatL2(image_dim)
image_index.add(image_embeddings)
faiss.write_index(image_index, "knowledge_base/image_index.faiss")
print("Created minimal knowledge base")
print("Setup complete!")
"""
# Write download script to file
with open("download_models.py", "w") as f:
f.write(download_script)
print("Created download_models.py script")
# Create a Hugging Face Space configuration file
space_config = {
"title": "MediQuery - AI Medical Assistant",
"emoji": "🩺",
"colorFrom": "blue",
"colorTo": "indigo",
"sdk": "gradio",
"sdk_version": "3.36.1",
"python_version": "3.10",
"app_file": "app.py",
"pinned": False
}
# Write space config to file
with open("README.md", "a") as f:
f.write("\n\n---\ntags: [healthcare, medical, xray, radiology, multimodal]\n")
print("Updated README.md with tags for Hugging Face Spaces")
print("All deployment files created successfully!")