bravewiki commited on
Commit
76223e9
·
verified ·
1 Parent(s): 2000e9e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -5
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import streamlit as st
2
  import torch
3
- from transformers import VisionEncoderDecoderModel, AutoTokenizer, pipeline
4
  from pdf2image import convert_from_path
5
  import pytesseract
6
  from PIL import Image
@@ -9,12 +9,9 @@ import io
9
  from typing import List, Tuple
10
 
11
  # Initialize models and tokenizer
12
- vision_model_name = "nlpconnect/vit-gpt2-image-captioning"
13
  text_model_name = "peteparker456/medical_diagnosis_llama2"
14
 
15
  # Load the vision and text models
16
- vision_model = VisionEncoderDecoderModel.from_pretrained(vision_model_name)
17
- vision_tokenizer = AutoTokenizer.from_pretrained(vision_model_name)
18
  text_model = pipeline("text-generation", model=text_model_name)
19
 
20
  pytesseract.pytesseract.tesseract_cmd = r'/usr/bin/tesseract' # Path to Tesseract executable
@@ -43,7 +40,6 @@ def extract_text_from_pdf(pdf_path: str) -> str:
43
  def generate_insights(text: str) -> List[Tuple[str, str]]:
44
  """Get interpretations and recommendations from the text."""
45
  # Create a dummy input for the text model
46
- inputs = vision_tokenizer.encode(text, return_tensors="pt", max_length=1000, truncation=True)
47
  output_text = text_model(text, max_length=1000)[0]["generated_text"]
48
 
49
  return [
 
1
  import streamlit as st
2
  import torch
3
+ from transformers import AutoTokenizer, pipeline
4
  from pdf2image import convert_from_path
5
  import pytesseract
6
  from PIL import Image
 
9
  from typing import List, Tuple
10
 
11
  # Initialize models and tokenizer
 
12
  text_model_name = "peteparker456/medical_diagnosis_llama2"
13
 
14
  # Load the vision and text models
 
 
15
  text_model = pipeline("text-generation", model=text_model_name)
16
 
17
  pytesseract.pytesseract.tesseract_cmd = r'/usr/bin/tesseract' # Path to Tesseract executable
 
40
  def generate_insights(text: str) -> List[Tuple[str, str]]:
41
  """Get interpretations and recommendations from the text."""
42
  # Create a dummy input for the text model
 
43
  output_text = text_model(text, max_length=1000)[0]["generated_text"]
44
 
45
  return [