Spaces:
Runtime error
Runtime error
File size: 6,729 Bytes
8500b5e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 |
import os
import json
from datetime import datetime
# Create a requirements.txt file for Hugging Face Spaces deployment
requirements = [
"torch>=1.10.0",
"torchvision>=0.11.0",
"transformers>=4.18.0",
"gradio>=3.0.0",
"fastapi>=0.75.0",
"uvicorn>=0.17.0",
"pandas>=1.3.0",
"numpy>=1.20.0",
"Pillow>=9.0.0",
"faiss-cpu>=1.7.0",
"opencv-python-headless>=4.5.0",
"matplotlib>=3.5.0",
"tqdm>=4.62.0",
"python-multipart>=0.0.5"
]
# Write requirements to file
with open("requirements.txt", "w") as f:
for req in requirements:
f.write(f"{req}\n")
print("Created requirements.txt file for Hugging Face Spaces deployment")
# Create a README.md file for the Hugging Face Space
readme = """# MediQuery - AI Multimodal Medical Assistant
MediQuery is an AI-powered medical assistant that analyzes chest X-rays and answers medical queries using advanced deep learning models.
## Features
- **X-ray Analysis**: Upload a chest X-ray image for AI-powered analysis
- **Medical Query**: Ask questions about medical conditions, findings, and interpretations
- **Visual Explanations**: View attention maps highlighting important areas in X-rays
- **Comprehensive Reports**: Get detailed findings and impressions in structured format
## How to Use
### Image Analysis
1. Upload a chest X-ray image
2. Click "Analyze X-ray"
3. View the analysis results and attention map
### Text Query
1. Enter your medical question
2. Click "Submit Query"
3. Read the AI-generated response
## API Documentation
This Space also provides a REST API for integration with other applications:
- `POST /api/query`: Process a text query
- `POST /api/analyze-image`: Analyze an X-ray image
- `GET /api/health`: Check API health
## About
MediQuery combines state-of-the-art image models (DenseNet/CheXNet) with medical language models (BioBERT) and a fine-tuned FLAN-T5 generator to provide accurate and informative medical assistance.
Created by Tanishk Soni
"""
# Write README to file
with open("README.md", "w") as f:
f.write(readme)
print("Created README.md file for Hugging Face Spaces")
# Create a .gitignore file
gitignore = """# Python
__pycache__/
*.py[cod]
*$py.class
*.so
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
# Logs
logs/
*.log
# Temporary files
/tmp/
.DS_Store
# Virtual Environment
venv/
ENV/
# IDE
.idea/
.vscode/
*.swp
*.swo
# Model files (add these manually)
*.pt
*.pth
*.bin
*.faiss
"""
# Write .gitignore to file
with open(".gitignore", "w") as f:
f.write(gitignore)
print("Created .gitignore file")
# Create a simple script to download model weights
download_script = """import os
import torch
from torchvision import models
from transformers import AutoTokenizer, AutoModel, T5ForConditionalGeneration, T5Tokenizer
import faiss
import numpy as np
import pandas as pd
# Create directories
os.makedirs("models/flan-t5-finetuned", exist_ok=True)
os.makedirs("knowledge_base", exist_ok=True)
print("Downloading model weights...")
# Download image model (DenseNet121)
image_model = models.densenet121(pretrained=True)
torch.save(image_model.state_dict(), "models/densenet121.pt")
print("Downloaded DenseNet121 weights")
# Download text model (BioBERT)
tokenizer = AutoTokenizer.from_pretrained("dmis-lab/biobert-v1.1")
model = AutoModel.from_pretrained("dmis-lab/biobert-v1.1")
tokenizer.save_pretrained("models/biobert")
model.save_pretrained("models/biobert")
print("Downloaded BioBERT weights")
# Download generation model (FLAN-T5)
gen_tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-base")
gen_model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-base")
gen_tokenizer.save_pretrained("models/flan-t5-finetuned")
gen_model.save_pretrained("models/flan-t5-finetuned")
print("Downloaded FLAN-T5 weights")
# Create a minimal knowledge base
print("Creating minimal knowledge base...")
text_data = pd.DataFrame({
'combined_text': [
"The chest X-ray shows clear lung fields with no evidence of consolidation, effusion, or pneumothorax. The heart size is normal. No acute cardiopulmonary abnormality.",
"Bilateral patchy airspace opacities consistent with multifocal pneumonia. No pleural effusion or pneumothorax. Heart size is normal.",
"Cardiomegaly with pulmonary vascular congestion and bilateral pleural effusions, consistent with congestive heart failure. No pneumothorax or pneumonia.",
"Right upper lobe opacity concerning for pneumonia. No pleural effusion or pneumothorax. Heart size is normal.",
"Left lower lobe atelectasis. No pneumothorax or pleural effusion. Heart size is normal.",
"Bilateral pleural effusions with bibasilar atelectasis. Cardiomegaly present. Findings consistent with heart failure.",
"Right pneumothorax with partial lung collapse. No pleural effusion. Heart size is normal.",
"Endotracheal tube, central venous catheter, and nasogastric tube in place. No pneumothorax or pleural effusion.",
"Hyperinflated lungs with flattened diaphragms, consistent with COPD. No acute infiltrate or effusion.",
"Multiple rib fractures on the right side. No pneumothorax or hemothorax. Lung fields are clear."
],
'valid_index': list(range(10))
})
text_data.to_csv("knowledge_base/text_data.csv", index=False)
# Create dummy FAISS indices
text_dim = 768
text_embeddings = np.random.rand(len(text_data), text_dim).astype('float32')
image_dim = 1024
image_embeddings = np.random.rand(len(text_data), image_dim).astype('float32')
# Create FAISS indices
text_index = faiss.IndexFlatL2(text_dim)
text_index.add(text_embeddings)
faiss.write_index(text_index, "knowledge_base/text_index.faiss")
image_index = faiss.IndexFlatL2(image_dim)
image_index.add(image_embeddings)
faiss.write_index(image_index, "knowledge_base/image_index.faiss")
print("Created minimal knowledge base")
print("Setup complete!")
"""
# Write download script to file
with open("download_models.py", "w") as f:
f.write(download_script)
print("Created download_models.py script")
# Create a Hugging Face Space configuration file
space_config = {
"title": "MediQuery - AI Medical Assistant",
"emoji": "π©Ί",
"colorFrom": "blue",
"colorTo": "indigo",
"sdk": "gradio",
"sdk_version": "3.36.1",
"python_version": "3.10",
"app_file": "app.py",
"pinned": False
}
# Write space config to file
with open("README.md", "a") as f:
f.write("\n\n---\ntags: [healthcare, medical, xray, radiology, multimodal]\n")
print("Updated README.md with tags for Hugging Face Spaces")
print("All deployment files created successfully!")
|