Updated app.py
Browse files
app.py
CHANGED
@@ -1,25 +1,7 @@
|
|
1 |
-
# Step 1: Clone the DeepSeek-VL2 repository (Only for Colab environments)
|
2 |
import os
|
3 |
-
|
4 |
-
!git clone https://github.com/deepseek-ai/DeepSeek-VL2
|
5 |
-
%cd DeepSeek-VL2
|
6 |
-
else:
|
7 |
-
print("Repository already cloned.")
|
8 |
-
|
9 |
-
# Install dependencies if not already installed (you can add more if necessary)
|
10 |
-
!pip install -r /content/DeepSeek-VL2/requirements.txt
|
11 |
-
|
12 |
-
# Step 2: Ensure DeepSeek-VL2 is in the Python path
|
13 |
import sys
|
14 |
-
sys.path.append('/content/DeepSeek-VL2') # Add the DeepSeek-VL2 repository to the path
|
15 |
-
|
16 |
-
# Step 3: Verify GPU (Optional)
|
17 |
import torch
|
18 |
-
print("CUDA Available:", torch.cuda.is_available())
|
19 |
-
print("Device:", torch.cuda.current_device() if torch.cuda.is_available() else "CPU")
|
20 |
-
print("Device Name:", torch.cuda.get_device_name(0) if torch.cuda.is_available() else "N/A")
|
21 |
-
|
22 |
-
# Step 4: Rest of your app.py code
|
23 |
import gradio as gr
|
24 |
from PIL import Image
|
25 |
from deepseek_vl2.serve.inference import load_model, deepseek_generate, convert_conversation_to_prompts
|
@@ -27,6 +9,23 @@ from deepseek_vl2.models.conversation import SeparatorStyle
|
|
27 |
from deepseek_vl2.serve.app_modules.utils import configure_logger, strip_stop_words, pil_to_base64
|
28 |
from google.colab import files
|
29 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
logger = configure_logger()
|
31 |
|
32 |
MODELS = ["deepseek-ai/deepseek-vl2-tiny"]
|
@@ -139,3 +138,4 @@ with gr.Blocks() as demo:
|
|
139 |
submit_btn.click(upload_and_process, inputs=image_input, outputs=[output_text, history_state])
|
140 |
|
141 |
demo.launch(share=True, debug=True) # Added debug=True for more Gradio logs
|
|
|
|
|
|
1 |
import os
|
2 |
+
import subprocess
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
import sys
|
|
|
|
|
|
|
4 |
import torch
|
|
|
|
|
|
|
|
|
|
|
5 |
import gradio as gr
|
6 |
from PIL import Image
|
7 |
from deepseek_vl2.serve.inference import load_model, deepseek_generate, convert_conversation_to_prompts
|
|
|
9 |
from deepseek_vl2.serve.app_modules.utils import configure_logger, strip_stop_words, pil_to_base64
|
10 |
from google.colab import files
|
11 |
|
12 |
+
# Step 1: Clone the DeepSeek-VL2 repository if not already present
|
13 |
+
repo_dir = "/content/DeepSeek-VL2"
|
14 |
+
if not os.path.exists(repo_dir):
|
15 |
+
subprocess.run(["git", "clone", "https://github.com/deepseek-ai/DeepSeek-VL2"], check=True)
|
16 |
+
sys.path.append(repo_dir) # Add the DeepSeek-VL2 repository to the Python path
|
17 |
+
else:
|
18 |
+
print("Repository already cloned.")
|
19 |
+
|
20 |
+
# Step 2: Install dependencies if not already installed
|
21 |
+
subprocess.run([sys.executable, "-m", "pip", "install", "-r", "/content/DeepSeek-VL2/requirements.txt"])
|
22 |
+
|
23 |
+
# Step 3: Verify GPU (Optional)
|
24 |
+
print("CUDA Available:", torch.cuda.is_available())
|
25 |
+
print("Device:", torch.cuda.current_device() if torch.cuda.is_available() else "CPU")
|
26 |
+
print("Device Name:", torch.cuda.get_device_name(0) if torch.cuda.is_available() else "N/A")
|
27 |
+
|
28 |
+
# Step 4: Define your model and prediction functions
|
29 |
logger = configure_logger()
|
30 |
|
31 |
MODELS = ["deepseek-ai/deepseek-vl2-tiny"]
|
|
|
138 |
submit_btn.click(upload_and_process, inputs=image_input, outputs=[output_text, history_state])
|
139 |
|
140 |
demo.launch(share=True, debug=True) # Added debug=True for more Gradio logs
|
141 |
+
|