Ronith55 commited on
Commit
edb8721
Β·
verified Β·
1 Parent(s): 2798fcf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -5
app.py CHANGED
@@ -1,8 +1,9 @@
1
  import torch
2
- from transformers import pipeline, AutoModel, AutoProcessor
3
  from PIL import Image
 
4
 
5
- # βœ… Use pipeline for image-text-to-text
6
  pipe = pipeline("image-text-to-text", model="deepseek-ai/deepseek-vl2-small", trust_remote_code=True)
7
 
8
  # βœ… Load model directly (alternative approach)
@@ -10,13 +11,20 @@ model = AutoModel.from_pretrained("deepseek-ai/deepseek-vl2-small", trust_remote
10
 
11
  # βœ… Function to process image and text
12
  def predict(image_path, text_prompt):
13
- image = Image.open("C:\Users\70088531\Downloads\sample_img2.JPG").convert("RGB")
 
 
 
14
  messages = [{"role": "user", "content": text_prompt}]
15
  result = pipe(image, messages)
16
  return result
17
 
18
  # βœ… Example usage
19
  if __name__ == "__main__":
20
- test_image = "test.jpg" # Replace with an actual image file
 
21
  prompt = "Describe this image."
22
- print("Generated Response:", predict(test_image, prompt))
 
 
 
 
1
  import torch
2
+ import os
3
  from PIL import Image
4
+ from transformers import pipeline, AutoModel
5
 
6
+ # βœ… Load the model using pipeline
7
  pipe = pipeline("image-text-to-text", model="deepseek-ai/deepseek-vl2-small", trust_remote_code=True)
8
 
9
  # βœ… Load model directly (alternative approach)
 
11
 
12
  # βœ… Function to process image and text
13
  def predict(image_path, text_prompt):
14
+ # Ensure correct path format for Windows/Linux
15
+ image_path = image_path.replace("\\", "/")
16
+ image = Image.open(image_path).convert("RGB")
17
+
18
  messages = [{"role": "user", "content": text_prompt}]
19
  result = pipe(image, messages)
20
  return result
21
 
22
  # βœ… Example usage
23
  if __name__ == "__main__":
24
+ # Replace this with the correct image path
25
+ test_image = "sample_img2.JPG" # Ensure this image exists in the same folder
26
  prompt = "Describe this image."
27
+
28
+ # Run prediction
29
+ output = predict(test_image, prompt)
30
+ print("Generated Response:", output)