File size: 3,302 Bytes
0999443 8c01ffb 0999443 78ff932 0999443 78ff932 0999443 78ff932 0999443 8c01ffb 0999443 8c01ffb 0999443 8c01ffb 0999443 8c01ffb 0999443 78ff932 0999443 8fe992b 0999443 98bfd8d 0999443 98bfd8d 0999443 98bfd8d 8676cfb db878c1 98bfd8d 9b5b26a 0999443 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
# File: app.py
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer, BitsAndBytesConfig
from peft import PeftModel, PeftConfig
import torch
# Load PEFT adapter configuration
peft_config = PeftConfig.from_pretrained("unica/CLiMA")
# BitsAndBytes 4-bit config
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4", # Most efficient for LLMs
bnb_4bit_compute_dtype=torch.bfloat16, # Use bfloat16 or float16 depending on your GPU
bnb_4bit_use_double_quant=True
)
base_model = AutoModelForCausalLM.from_pretrained(
peft_config.base_model_name_or_path,
quantization_config=bnb_config,
device_map="auto"
)
# Load adapter weights
model = PeftModel.from_pretrained(base_model, "unica/CLiMA")
# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(peft_config.base_model_name_or_path)
# Format prompt
def format_prompt(user_input, entity1, entity2):
return f"Identify causal relations in the following clinical narrative:\n\n{user_input}\n\nEntity 1: {entity1}\nEntity 2: {entity2}\n\nCausal relations:"
# Prediction function
def generate_relations(text, entity1, entity2):
prompt = format_prompt(text, entity1, entity2)
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
outputs = model.generate(**inputs, max_new_tokens=256, do_sample=False)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response[len(prompt):].strip() # remove prompt from output if echoed
# Gradio UI
demo = gr.Interface(
fn=generate_relations,
inputs=[
gr.Textbox(lines=10, label="Clinical Note or Drug Review Text"),
gr.Textbox(label="Entity 1 (e.g., Drug)"),
gr.Textbox(label="Entity 2 (e.g., Condition or Symptom)")
],
outputs=gr.Textbox(label="Extracted Causal Relations"),
title="Causal Relation Extractor with MedLlama",
description="Paste your clinical note or drug review, and specify two target entities. This AI agent extracts drug-condition or symptom causal relations using a fine-tuned LLM adapter model.",
examples=[
["Odynophagia: Was presumed due to mucositis from recent chemotherapy.", "chemotherapy", "mucositis"],
["patient's wife noticed erythema on patient's face. On [**3-27**]the visiting nurse [**First Name (Titles) 8706**][**Last Name (Titles)11282**]of a rash on his arms as well. The patient was noted to be febrile and was admitted to the [**Company 191**] Firm. In the EW, patient's Dilantin was discontinued and he was given Tegretol instead.", "Dilantin", "erythema on patient's face"],
["i had a urinary tract infection so bad that when i pee it smells but when i started taking ciprofloxacin it worked it’s a good medicine for a urinary tract infections.","ciprofloxacin","urinary tract infection"],
["when i first started using ziana, i only had acne in between my eyebrows, chin, and the nose area. my acne worsened while using it and then it got better. but after about 4 months of using it, it became ineffective. so i now have acne between my eyebrows, chin, cheeks, forehead, and the nose area. its great at first but after a while it made my face even worse than before i used the product.","ziana","acne"]
]
)
demo.launch()
|