Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# app.py
|
2 |
+
import os
|
3 |
+
import gradio as gr
|
4 |
+
import torch
|
5 |
+
import requests
|
6 |
+
from transformers import pipeline, AutoModelForTokenClassification, AutoTokenizer
|
7 |
+
from monai.networks.nets import DenseNet121
|
8 |
+
import torchxrayvision as xrv
|
9 |
+
|
10 |
+
# Configuration
|
11 |
+
DEEPSEEK_API_URL = "https://api.deepseek.com/v1/chat/completions"
|
12 |
+
DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY") # Set in Hugging Face secrets
|
13 |
+
|
14 |
+
DISCLAIMER = """
|
15 |
+
<div style="color: red; border: 2px solid red; padding: 15px; margin: 10px;">
|
16 |
+
⚠️ WARNING: This is a prototype demonstration only. NOT ACTUAL MEDICAL ADVICE.
|
17 |
+
DO NOT USE FOR REAL HEALTH DECISIONS. CONSULT LICENSED PROFESSIONALS.
|
18 |
+
</div>
|
19 |
+
"""
|
20 |
+
|
21 |
+
class MedicalAssistant:
|
22 |
+
def __init__(self):
|
23 |
+
# Medical imaging models
|
24 |
+
self.medical_models = self._init_imaging_models()
|
25 |
+
|
26 |
+
# Clinical text processing
|
27 |
+
self.prescription_parser = pipeline(
|
28 |
+
"token-classification",
|
29 |
+
model="obi/deid_bert_i2b2",
|
30 |
+
tokenizer="obi/deid_bert_i2b2"
|
31 |
+
)
|
32 |
+
|
33 |
+
# Safety systems
|
34 |
+
self.safety_filter = pipeline(
|
35 |
+
"text-classification",
|
36 |
+
model="Hate-speech-CNERG/dehatebert-mono-english"
|
37 |
+
)
|
38 |
+
|
39 |
+
def _init_imaging_models(self):
|
40 |
+
"""Initialize medical imaging models"""
|
41 |
+
return {
|
42 |
+
"xray": xrv.models.DenseNet(weights="densenet121-res224-all"),
|
43 |
+
"ct": DenseNet121(spatial_dims=3, in_channels=1, out_channels=14),
|
44 |
+
"histo": torch.hub.load('pytorch/vision', 'resnet50', pretrained=True)
|
45 |
+
}
|
46 |
+
|
47 |
+
def query_deepseek(self, prompt: str):
|
48 |
+
"""Query DeepSeek medical research API"""
|
49 |
+
headers = {
|
50 |
+
"Content-Type": "application/json",
|
51 |
+
"Authorization": f"Bearer {DEEPSEEK_API_KEY}"
|
52 |
+
}
|
53 |
+
|
54 |
+
payload = {
|
55 |
+
"model": "deepseek-medical-1.0",
|
56 |
+
"messages": [{
|
57 |
+
"role": "user",
|
58 |
+
"content": f"MEDICAL PROMPT: {prompt}\nRespond with latest research-supported information. Cite sources."
|
59 |
+
}],
|
60 |
+
"temperature": 0.2,
|
61 |
+
"max_tokens": 500
|
62 |
+
}
|
63 |
+
|
64 |
+
try:
|
65 |
+
response = requests.post(DEEPSEEK_API_URL, json=payload, headers=headers)
|
66 |
+
response.raise_for_status()
|
67 |
+
return response.json()['choices'][0]['message']['content']
|
68 |
+
except Exception as e:
|
69 |
+
return f"API Error: {str(e)}"
|
70 |
+
|
71 |
+
def analyze_image(self, image_path: str, modality: str):
|
72 |
+
"""Medical image analysis"""
|
73 |
+
try:
|
74 |
+
img = self._preprocess_image(image_path, modality)
|
75 |
+
|
76 |
+
if modality == "xray":
|
77 |
+
output = self.medical_models["xray"](img)
|
78 |
+
return xrv.datasets.default_pathologies[torch.argmax(output)]
|
79 |
+
elif modality == "ct":
|
80 |
+
output = self.medical_models["ct"](img)
|
81 |
+
return "CT analysis placeholder"
|
82 |
+
elif modality == "histo":
|
83 |
+
output = self.medical_models["histo"](img)
|
84 |
+
return "Histopathology analysis placeholder"
|
85 |
+
except Exception as e:
|
86 |
+
return f"Image analysis error: {str(e)}"
|
87 |
+
|
88 |
+
def parse_prescription(self, text: str):
|
89 |
+
"""Clinical text parsing"""
|
90 |
+
entities = self.prescription_parser(text)
|
91 |
+
return {
|
92 |
+
"medications": [ent for ent in entities if ent['entity'] == 'MEDICATION'],
|
93 |
+
"dosage": [ent for ent in entities if ent['entity'] == 'DOSAGE']
|
94 |
+
}
|
95 |
+
|
96 |
+
def generate_response(self, query: str, context: dict):
|
97 |
+
"""Generate safe, research-backed response"""
|
98 |
+
# Construct enhanced prompt
|
99 |
+
research_prompt = f"""
|
100 |
+
Medical Query: {query}
|
101 |
+
Context:
|
102 |
+
- Image Findings: {context.get('image_analysis', 'N/A')}
|
103 |
+
- Prescription Data: {context.get('prescription', 'N/A')}
|
104 |
+
|
105 |
+
Requirements:
|
106 |
+
1. Provide evidence-based medical information
|
107 |
+
2. Cite recent research (post-2020 when possible)
|
108 |
+
3. Include safety considerations
|
109 |
+
4. Note confidence level
|
110 |
+
"""
|
111 |
+
|
112 |
+
# Get DeepSeek research response
|
113 |
+
raw_response = self.query_deepseek(research_prompt)
|
114 |
+
|
115 |
+
# Apply safety filters
|
116 |
+
if self._is_unsafe(raw_response):
|
117 |
+
return "I cannot provide advice on this matter. Please consult a healthcare professional."
|
118 |
+
|
119 |
+
return self._add_disclaimer(raw_response)
|
120 |
+
|
121 |
+
def _is_unsafe(self, text: str):
|
122 |
+
"""Content safety check"""
|
123 |
+
return self.safety_filter(text)[0]['label'] == 'HATE'
|
124 |
+
|
125 |
+
def _add_disclaimer(self, text: str):
|
126 |
+
"""Add legal disclaimer to response"""
|
127 |
+
return f"{text}\n\n---\n⚠️ This information is for research purposes only. Not medical advice."
|
128 |
+
|
129 |
+
def _preprocess_image(self, image_path: str, modality: str):
|
130 |
+
"""Image preprocessing placeholder"""
|
131 |
+
return torch.rand((1, 224, 224))
|
132 |
+
|
133 |
+
# Initialize system
|
134 |
+
assistant = MedicalAssistant()
|
135 |
+
|
136 |
+
def process_input(query, image, prescription):
|
137 |
+
context = {}
|
138 |
+
|
139 |
+
if image is not None:
|
140 |
+
context["image_analysis"] = assistant.analyze_image(image, "xray")
|
141 |
+
|
142 |
+
if prescription:
|
143 |
+
context["prescription"] = assistant.parse_prescription(prescription)
|
144 |
+
|
145 |
+
return assistant.generate_response(query, context)
|
146 |
+
|
147 |
+
# Gradio interface
|
148 |
+
interface = gr.Interface(
|
149 |
+
fn=process_input,
|
150 |
+
inputs=[
|
151 |
+
gr.Textbox(label="Medical Query", placeholder="Enter your medical question..."),
|
152 |
+
gr.Image(label="Medical Imaging", type="filepath"),
|
153 |
+
gr.Textbox(label="Prescription Text")
|
154 |
+
],
|
155 |
+
outputs=gr.Textbox(label="Research-Backed Response"),
|
156 |
+
title="AI Medical Research Assistant",
|
157 |
+
description=DISCLAIMER,
|
158 |
+
allow_flagging="never"
|
159 |
+
)
|
160 |
+
|
161 |
+
interface.launch() make a requirements and readme file for this code for huggins face
|