Anuji commited on
Commit
7e00771
·
verified ·
1 Parent(s): 3c6abc9

changed app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -5
app.py CHANGED
@@ -1,13 +1,26 @@
1
- # Step 2: Verify GPU
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import torch
3
  print("CUDA Available:", torch.cuda.is_available())
4
  print("Device:", torch.cuda.current_device() if torch.cuda.is_available() else "CPU")
5
  print("Device Name:", torch.cuda.get_device_name(0) if torch.cuda.is_available() else "N/A")
6
 
7
- # Step 3: Modified app.py for Colab with debugging
8
- import os
9
  import gradio as gr
10
- import torch
11
  from PIL import Image
12
  from deepseek_vl2.serve.inference import load_model, deepseek_generate, convert_conversation_to_prompts
13
  from deepseek_vl2.models.conversation import SeparatorStyle
@@ -125,4 +138,4 @@ with gr.Blocks() as demo:
125
  submit_btn = gr.Button("Extract Text")
126
  submit_btn.click(upload_and_process, inputs=image_input, outputs=[output_text, history_state])
127
 
128
- demo.launch(share=True, debug=True) # Added debug=True for more Gradio logs
 
1
+ # Step 1: Clone the DeepSeek-VL2 repository (Only for Colab environments)
2
+ import os
3
+ if not os.path.exists("/content/DeepSeek-VL2"):
4
+ !git clone https://github.com/deepseek-ai/DeepSeek-VL2
5
+ %cd DeepSeek-VL2
6
+ else:
7
+ print("Repository already cloned.")
8
+
9
+ # Install dependencies if not already installed (you can add more if necessary)
10
+ !pip install -r /content/DeepSeek-VL2/requirements.txt
11
+
12
+ # Step 2: Ensure DeepSeek-VL2 is in the Python path
13
+ import sys
14
+ sys.path.append('/content/DeepSeek-VL2') # Add the DeepSeek-VL2 repository to the path
15
+
16
+ # Step 3: Verify GPU (Optional)
17
  import torch
18
  print("CUDA Available:", torch.cuda.is_available())
19
  print("Device:", torch.cuda.current_device() if torch.cuda.is_available() else "CPU")
20
  print("Device Name:", torch.cuda.get_device_name(0) if torch.cuda.is_available() else "N/A")
21
 
22
+ # Step 4: Rest of your app.py code
 
23
  import gradio as gr
 
24
  from PIL import Image
25
  from deepseek_vl2.serve.inference import load_model, deepseek_generate, convert_conversation_to_prompts
26
  from deepseek_vl2.models.conversation import SeparatorStyle
 
138
  submit_btn = gr.Button("Extract Text")
139
  submit_btn.click(upload_and_process, inputs=image_input, outputs=[output_text, history_state])
140
 
141
+ demo.launch(share=True, debug=True) # Added debug=True for more Gradio logs