TheEighthDay commited on
Commit
d5cd46b
·
verified ·
1 Parent(s): 83ba4e4

Upload simple_inference.py

Browse files
Files changed (1) hide show
  1. simple_inference.py +233 -0
simple_inference.py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import argparse
3
+ import json
4
+ import os
5
+ import torch
6
+ import base64
7
+ from io import BytesIO
8
+ from PIL import Image
9
+
10
+ # 条件导入,根据选择的推理引擎
11
+ try:
12
+ from transformers import AutoProcessor, Qwen2_5_VLForConditionalGeneration
13
+ transformers_available = True
14
+ except ImportError:
15
+ transformers_available = False
16
+ print("警告: transformers相关库未安装,无法使用transformers引擎")
17
+
18
+ try:
19
+ from vllm import LLM, SamplingParams
20
+ vllm_available = True
21
+ except ImportError:
22
+ vllm_available = False
23
+ print("警告: vllm相关库未安装,无法使用vllm引擎")
24
+
25
+ # 合并 qwen_vl_utils 的代码
26
+ def process_vision_info(messages):
27
+ """处理多模态消息中的图像和视频信息
28
+
29
+ Args:
30
+ messages: 包含图像或视频的消息列表
31
+
32
+ Returns:
33
+ images_data: 处理后的图像数据
34
+ videos_data: 处理后的视频数据
35
+ """
36
+ images_list, videos_list = [], []
37
+ for message in messages:
38
+ content = message.get("content", None)
39
+ if isinstance(content, str):
40
+ # 纯文本消息,不处理
41
+ continue
42
+ elif isinstance(content, list):
43
+ # 混合消息,可能包含图像或视频
44
+ for item in content:
45
+ if not isinstance(item, dict):
46
+ continue
47
+
48
+ # 处理图像
49
+ if item.get("type") == "image" and "image" in item:
50
+ image = item["image"]
51
+ if isinstance(image, str):
52
+ # 图像URL或路径,尝试加载
53
+ try:
54
+ image = Image.open(image)
55
+ except Exception as e:
56
+ print(f"图像加载失败: {e}")
57
+ continue
58
+
59
+ # 转换PIL图像为base64编码
60
+ if isinstance(image, Image.Image):
61
+ buffered = BytesIO()
62
+ image.save(buffered, format="PNG")
63
+ image_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
64
+ images_list.append(image_str)
65
+
66
+ # 处理视频(如有需要)
67
+ elif item.get("type") == "video" and "video" in item:
68
+ # 暂不支持视频
69
+ pass
70
+
71
+ return images_list or None, videos_list or None
72
+
73
+ def predict_location(
74
+ image_path,
75
+ model_name="Qwen/Qwen2.5-VL-7B-Instruct",
76
+ inference_engine="transformers"
77
+ ):
78
+ """
79
+ 对单个图片进行位置识别预测
80
+
81
+ 参数:
82
+ image_path: 图片文件路径
83
+ model_name: 模型名称或路径
84
+ inference_engine: 推理引擎,"vllm" 或 "transformers"
85
+
86
+ 返回:
87
+ 预测结果文本
88
+ """
89
+ # 检查图片是否存在
90
+ if not os.path.exists(image_path):
91
+ return f"错误: 图片文件不存在: {image_path}"
92
+
93
+ # 加载图片
94
+ try:
95
+ image = Image.open(image_path)
96
+ print(f"成功加载图片: {image_path}")
97
+ except Exception as e:
98
+ return f"错误: 无法加载图片: {str(e)}"
99
+
100
+ # 加载处理器
101
+ print(f"加载处理器: {model_name}")
102
+ processor = AutoProcessor.from_pretrained(model_name, padding_side='left')
103
+
104
+ # 构建提示消息 - 简化版本,没有SFT和COT
105
+ question_text = "In which country and within which first-level administrative region of that country was this picture taken?Please answer in the format of <answer>$country,administrative_area_level_1$</answer>?"
106
+ system_message = "You are a helpful assistant good at solving problems with step-by-step reasoning. You should first thinks about the reasoning process in the mind and then provides the user with the answer. The reasoning process and answer are enclosed within <think> </think> and <answer> </answer> tags."
107
+
108
+ # 构建简化后的提示消息
109
+ prompt_messages = [
110
+ {
111
+ "role": "system",
112
+ "content": [
113
+ {"type": "text", "text": system_message}
114
+ ]
115
+ },
116
+ {
117
+ "role": "user",
118
+ "content": [
119
+ {"type": "image", "image": image},
120
+ {"type": "text", "text": question_text}
121
+ ]
122
+ }
123
+ ]
124
+
125
+ # 根据选定的引擎进行推理
126
+ if inference_engine == "vllm":
127
+ if not vllm_available:
128
+ return "错误: vLLM库不可用,请安装vllm或选择transformers引擎"
129
+
130
+ # 使用vLLM进行推理
131
+ print(f"使用vLLM加载模型: {model_name}")
132
+ llm = LLM(
133
+ model=model_name,
134
+ limit_mm_per_prompt={"image": 10, "video": 10},
135
+ dtype="auto",
136
+ gpu_memory_utilization=0.95,
137
+ )
138
+
139
+ # 设置采样参数
140
+ sampling_params = SamplingParams(
141
+ temperature=0.7,
142
+ top_p=0.8,
143
+ repetition_penalty=1.05,
144
+ max_tokens=2048,
145
+ stop_token_ids=[],
146
+ )
147
+
148
+ # 处理消息为vLLM格式
149
+ prompt = processor.apply_chat_template(
150
+ prompt_messages,
151
+ tokenize=False,
152
+ add_generation_prompt=True,
153
+ )
154
+
155
+ # 处理图像数据
156
+ image_inputs, video_inputs = process_vision_info(prompt_messages)
157
+
158
+ mm_data = {}
159
+ if image_inputs is not None:
160
+ mm_data["image"] = image_inputs
161
+
162
+ # 构建vLLM输入
163
+ llm_input = {
164
+ "prompt": prompt,
165
+ "multi_modal_data": mm_data,
166
+ }
167
+
168
+ # 生成回答
169
+ outputs = llm.generate([llm_input], sampling_params=sampling_params)
170
+ response = outputs[0].outputs[0].text
171
+
172
+ else: # transformers
173
+ if not transformers_available:
174
+ return "错误: Transformers相关库不可用,请安装必要的包"
175
+
176
+ # 使用transformers加载模型
177
+ print(f"使用transformers加载模型: {model_name}")
178
+ model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
179
+ model_name, torch_dtype="auto", device_map="auto"
180
+ )
181
+
182
+ # 准备输入
183
+ text = processor.apply_chat_template(
184
+ prompt_messages, tokenize=False, add_generation_prompt=True
185
+ )
186
+
187
+ # 处理输入
188
+ inputs = processor(
189
+ text=text,
190
+ images=prompt_messages[1]['content'][0]['image'],
191
+ return_tensors="pt",
192
+ )
193
+
194
+ inputs = inputs.to(model.device)
195
+
196
+ # 生成回答
197
+ with torch.no_grad():
198
+ generated_ids = model.generate(**inputs, max_new_tokens=2048)
199
+
200
+ # 处理输出
201
+ generated_ids_trimmed = generated_ids[0][len(inputs['input_ids'][0]):]
202
+ response = processor.decode(generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False)
203
+
204
+ # 清理GPU缓存
205
+ if torch.cuda.is_available():
206
+ torch.cuda.empty_cache()
207
+
208
+ print("\n=== 推理结果 ===")
209
+ print(response)
210
+ print("=================\n")
211
+
212
+ return response
213
+
214
+ if __name__ == "__main__":
215
+ # 命令行参数设置
216
+ parser = argparse.ArgumentParser(description='对单个图片进行位置识别预测')
217
+ parser.add_argument('--image_path', type=str, required=True,
218
+ help='图片文件路径')
219
+ parser.add_argument('--model_name', type=str, default="Qwen/Qwen2.5-VL-7B-Instruct",
220
+ help='模型名称或路径')
221
+ parser.add_argument('--inference_engine', type=str, default="transformers", choices=["vllm", "transformers"],
222
+ help='推理引擎: vllm 或 transformers')
223
+
224
+ args = parser.parse_args()
225
+
226
+ # 单个图片推理
227
+ result = predict_location(
228
+ image_path=args.image_path,
229
+ model_name=args.model_name,
230
+ inference_engine=args.inference_engine
231
+ )
232
+
233
+ print(f"最终预测结果: {result}")