ysn-rfd commited on
Commit
1be8a56
·
verified ·
1 Parent(s): 707b195

Upload 31 files

Browse files
opencv_test/Untitled.ipynb ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "9991fa5b-c41f-4117-a81f-75727fa4a74d",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "data": {
11
+ "application/vnd.jupyter.widget-view+json": {
12
+ "model_id": "a77abb8840a9407ebf60f186551f64d4",
13
+ "version_major": 2,
14
+ "version_minor": 0
15
+ },
16
+ "text/plain": [
17
+ "pytorch_model.bin: 0%| | 0.00/485M [00:00<?, ?B/s]"
18
+ ]
19
+ },
20
+ "metadata": {},
21
+ "output_type": "display_data"
22
+ },
23
+ {
24
+ "name": "stdout",
25
+ "output_type": "stream",
26
+ "text": [
27
+ "* Running on local URL: http://127.0.0.1:7860\n",
28
+ "* To create a public link, set `share=True` in `launch()`.\n"
29
+ ]
30
+ },
31
+ {
32
+ "data": {
33
+ "text/html": [
34
+ "<div><iframe src=\"http://127.0.0.1:7860/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
35
+ ],
36
+ "text/plain": [
37
+ "<IPython.core.display.HTML object>"
38
+ ]
39
+ },
40
+ "metadata": {},
41
+ "output_type": "display_data"
42
+ },
43
+ {
44
+ "data": {
45
+ "text/plain": []
46
+ },
47
+ "execution_count": 1,
48
+ "metadata": {},
49
+ "output_type": "execute_result"
50
+ }
51
+ ],
52
+ "source": [
53
+ "import gradio as gr\n",
54
+ "from transformers import AutoTokenizer, AutoModelForCausalLM\n",
55
+ "import torch\n",
56
+ "\n",
57
+ "# بارگذاری مدل و توکنایزر\n",
58
+ "tokenizer = AutoTokenizer.from_pretrained(\"HooshvareLab/gpt2-fa\")\n",
59
+ "model = AutoModelForCausalLM.from_pretrained(\"HooshvareLab/gpt2-fa\")\n",
60
+ "\n",
61
+ "# تابع تولید متن\n",
62
+ "def generate_text(prompt):\n",
63
+ " input_ids = tokenizer.encode(prompt, return_tensors=\"pt\")\n",
64
+ " output = model.generate(\n",
65
+ " input_ids,\n",
66
+ " max_new_tokens=120,\n",
67
+ " do_sample=True,\n",
68
+ " temperature=0.7,\n",
69
+ " top_k=40,\n",
70
+ " top_p=0.9,\n",
71
+ " repetition_penalty=1.3,\n",
72
+ " pad_token_id=tokenizer.eos_token_id\n",
73
+ " )\n",
74
+ " return tokenizer.decode(output[0], skip_special_tokens=True)\n",
75
+ "\n",
76
+ "# رابط گرافیکی\n",
77
+ "gr.Interface(\n",
78
+ " fn=generate_text,\n",
79
+ " inputs=gr.Textbox(label=\"متن ورودی (پرامپت)\", placeholder=\"مثلاً: در یک روز بهاری،\"),\n",
80
+ " outputs=gr.Textbox(label=\"متن تولید شده\"),\n",
81
+ " title=\"تولید متن فارسی با GPT2\",\n",
82
+ " description=\"مدل: bolbolzaban/gpt2-persian - تولید خودکار متن طبیعی به زبان فارسی\"\n",
83
+ ").launch()\n"
84
+ ]
85
+ },
86
+ {
87
+ "cell_type": "code",
88
+ "execution_count": 1,
89
+ "id": "c4d50353-c26f-45a9-9962-da748dc618f4",
90
+ "metadata": {},
91
+ "outputs": [
92
+ {
93
+ "name": "stdout",
94
+ "output_type": "stream",
95
+ "text": [
96
+ "* Running on local URL: http://127.0.0.1:7860\n",
97
+ "* To create a public link, set `share=True` in `launch()`.\n"
98
+ ]
99
+ },
100
+ {
101
+ "data": {
102
+ "text/html": [
103
+ "<div><iframe src=\"http://127.0.0.1:7860/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
104
+ ],
105
+ "text/plain": [
106
+ "<IPython.core.display.HTML object>"
107
+ ]
108
+ },
109
+ "metadata": {},
110
+ "output_type": "display_data"
111
+ },
112
+ {
113
+ "data": {
114
+ "text/plain": []
115
+ },
116
+ "execution_count": 1,
117
+ "metadata": {},
118
+ "output_type": "execute_result"
119
+ }
120
+ ],
121
+ "source": [
122
+ "import gradio as gr\n",
123
+ "from transformers import AutoTokenizer, AutoModelForCausalLM\n",
124
+ "import torch\n",
125
+ "\n",
126
+ "# بارگذاری مدل و توکنایزر\n",
127
+ "tokenizer = AutoTokenizer.from_pretrained(\"HooshvareLab/gpt2-fa\")\n",
128
+ "model = AutoModelForCausalLM.from_pretrained(\"HooshvareLab/gpt2-fa\")\n",
129
+ "\n",
130
+ "# تابع تولید متن با تنظیمات دقیق\n",
131
+ "def generate_text(prompt):\n",
132
+ " input_ids = tokenizer.encode(prompt, return_tensors=\"pt\")\n",
133
+ " output = model.generate(\n",
134
+ " input_ids,\n",
135
+ " max_new_tokens=150,\n",
136
+ " do_sample=True,\n",
137
+ " temperature=0.7,\n",
138
+ " top_k=50,\n",
139
+ " top_p=0.92,\n",
140
+ " repetition_penalty=1.3,\n",
141
+ " no_repeat_ngram_size=3,\n",
142
+ " pad_token_id=tokenizer.eos_token_id,\n",
143
+ " eos_token_id=tokenizer.eos_token_id\n",
144
+ " )\n",
145
+ " return tokenizer.decode(output[0], skip_special_tokens=True)\n",
146
+ "\n",
147
+ "# ساخت رابط گرافیکی با Gradio\n",
148
+ "gr.Interface(\n",
149
+ " fn=generate_text,\n",
150
+ " inputs=gr.Textbox(label=\"📝 متن ورودی (پرامپت)\", placeholder=\"مثلاً: در یک روز بهاری،\", lines=3),\n",
151
+ " outputs=gr.Textbox(label=\"📄 متن تولید شده\"),\n",
152
+ " title=\"💬 تولید متن فارسی دقیق با GPT2\",\n",
153
+ " description=\"این ابزار از مدل bolbolzaban/gpt2-persian استفاده می‌کند و متن روان و طبیعی به زبان فارسی تولید می‌کند.\",\n",
154
+ " theme=\"soft\"\n",
155
+ ").launch()\n"
156
+ ]
157
+ },
158
+ {
159
+ "cell_type": "code",
160
+ "execution_count": 1,
161
+ "id": "eba7eda5-f61f-434c-b500-58a8b33c6f0e",
162
+ "metadata": {},
163
+ "outputs": [
164
+ {
165
+ "name": "stdout",
166
+ "output_type": "stream",
167
+ "text": [
168
+ "* Running on local URL: http://127.0.0.1:7860\n",
169
+ "* To create a public link, set `share=True` in `launch()`.\n"
170
+ ]
171
+ },
172
+ {
173
+ "data": {
174
+ "text/html": [
175
+ "<div><iframe src=\"http://127.0.0.1:7860/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
176
+ ],
177
+ "text/plain": [
178
+ "<IPython.core.display.HTML object>"
179
+ ]
180
+ },
181
+ "metadata": {},
182
+ "output_type": "display_data"
183
+ },
184
+ {
185
+ "data": {
186
+ "text/plain": []
187
+ },
188
+ "execution_count": 1,
189
+ "metadata": {},
190
+ "output_type": "execute_result"
191
+ }
192
+ ],
193
+ "source": [
194
+ "import gradio as gr\n",
195
+ "from transformers import AutoTokenizer, AutoModelForCausalLM\n",
196
+ "import torch\n",
197
+ "\n",
198
+ "# بارگذاری مدل و توکنایزر\n",
199
+ "tokenizer = AutoTokenizer.from_pretrained(\"HooshvareLab/gpt2-fa\")\n",
200
+ "model = AutoModelForCausalLM.from_pretrained(\"HooshvareLab/gpt2-fa\")\n",
201
+ "\n",
202
+ "# تابع تولید متن با دستور\n",
203
+ "def generate_with_instruction(instruction):\n",
204
+ " prompt = f\"دستور: {instruction}\\nپاسخ:\"\n",
205
+ " input_ids = tokenizer.encode(prompt, return_tensors=\"pt\")\n",
206
+ " output = model.generate(\n",
207
+ " input_ids,\n",
208
+ " max_new_tokens=150,\n",
209
+ " do_sample=True,\n",
210
+ " temperature=0.7,\n",
211
+ " top_k=50,\n",
212
+ " top_p=0.92,\n",
213
+ " repetition_penalty=1.3,\n",
214
+ " no_repeat_ngram_size=3,\n",
215
+ " pad_token_id=tokenizer.eos_token_id,\n",
216
+ " eos_token_id=tokenizer.eos_token_id\n",
217
+ " )\n",
218
+ " return tokenizer.decode(output[0], skip_special_tokens=True)\n",
219
+ "\n",
220
+ "# رابط گرافیکی\n",
221
+ "gr.Interface(\n",
222
+ " fn=generate_with_instruction,\n",
223
+ " inputs=gr.Textbox(label=\"📝 دستور وارد کنید\", placeholder=\"مثلاً: درباره‌ی تاثیر ورزش بر ذهن بنویس\", lines=3),\n",
224
+ " outputs=gr.Textbox(label=\"📄 پاسخ مدل\"),\n",
225
+ " title=\"💬 دستور به مدل GPT2 فارسی\",\n",
226
+ " description=\"با وارد کردن دستور در قالب فارسی، مدل شروع به تولید متن می‌کند. مثل: نوشتن، خلاصه‌سازی یا ترجمه.\",\n",
227
+ " theme=\"soft\"\n",
228
+ ").launch()\n"
229
+ ]
230
+ },
231
+ {
232
+ "cell_type": "code",
233
+ "execution_count": null,
234
+ "id": "b83582e3-9740-46a5-8a1b-6329acb9ef16",
235
+ "metadata": {},
236
+ "outputs": [],
237
+ "source": []
238
+ }
239
+ ],
240
+ "metadata": {
241
+ "kernelspec": {
242
+ "display_name": "Python [conda env:base] *",
243
+ "language": "python",
244
+ "name": "conda-base-py"
245
+ },
246
+ "language_info": {
247
+ "codemirror_mode": {
248
+ "name": "ipython",
249
+ "version": 3
250
+ },
251
+ "file_extension": ".py",
252
+ "mimetype": "text/x-python",
253
+ "name": "python",
254
+ "nbconvert_exporter": "python",
255
+ "pygments_lexer": "ipython3",
256
+ "version": "3.12.7"
257
+ }
258
+ },
259
+ "nbformat": 4,
260
+ "nbformat_minor": 5
261
+ }
opencv_test/Untitled1.ipynb ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "6bfa65e2-6aa7-45d3-a52d-8d6339f75501",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stdout",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "* Running on local URL: http://127.0.0.1:7860\n",
14
+ "* To create a public link, set `share=True` in `launch()`.\n"
15
+ ]
16
+ },
17
+ {
18
+ "data": {
19
+ "text/html": [
20
+ "<div><iframe src=\"http://127.0.0.1:7860/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
21
+ ],
22
+ "text/plain": [
23
+ "<IPython.core.display.HTML object>"
24
+ ]
25
+ },
26
+ "metadata": {},
27
+ "output_type": "display_data"
28
+ },
29
+ {
30
+ "data": {
31
+ "text/plain": []
32
+ },
33
+ "execution_count": 1,
34
+ "metadata": {},
35
+ "output_type": "execute_result"
36
+ }
37
+ ],
38
+ "source": [
39
+ "import gradio as gr\n",
40
+ "from transformers import AutoTokenizer, AutoModelForCausalLM\n",
41
+ "import torch\n",
42
+ "\n",
43
+ "# بارگذاری مدل و توکنایزر\n",
44
+ "tokenizer = AutoTokenizer.from_pretrained(\"HooshvareLab/gpt2-fa\")\n",
45
+ "model = AutoModelForCausalLM.from_pretrained(\"HooshvareLab/gpt2-fa\")\n",
46
+ "\n",
47
+ "# تابع تولید متن با دستور\n",
48
+ "def generate_with_instruction(instruction):\n",
49
+ " prompt = f\"دستور: {instruction}\\nپاسخ:\"\n",
50
+ " input_ids = tokenizer.encode(prompt, return_tensors=\"pt\")\n",
51
+ " output = model.generate(\n",
52
+ " input_ids,\n",
53
+ " max_new_tokens=900,\n",
54
+ " do_sample=True,\n",
55
+ " temperature=0.7,\n",
56
+ " top_k=50,\n",
57
+ " top_p=0.92,\n",
58
+ " repetition_penalty=1.3,\n",
59
+ " no_repeat_ngram_size=3,\n",
60
+ " pad_token_id=tokenizer.eos_token_id,\n",
61
+ " eos_token_id=tokenizer.eos_token_id\n",
62
+ " )\n",
63
+ " return tokenizer.decode(output[0], skip_special_tokens=True)\n",
64
+ "\n",
65
+ "# رابط گرافیکی\n",
66
+ "gr.Interface(\n",
67
+ " fn=generate_with_instruction,\n",
68
+ " inputs=gr.Textbox(label=\"📝 دستور وارد کنید\", placeholder=\"مثلاً: درباره‌ی تاثیر ورزش بر ذهن بنویس\", lines=3),\n",
69
+ " outputs=gr.Textbox(label=\"📄 پاسخ مدل\"),\n",
70
+ " title=\"💬 دستور به مدل GPT2 فارسی\",\n",
71
+ " description=\"با وارد کردن دستور در قالب فارسی، مدل شروع به تولید متن می‌کند. مثل: نوشتن، خلاصه‌سازی یا ترجمه.\",\n",
72
+ " theme=\"soft\"\n",
73
+ ").launch()\n"
74
+ ]
75
+ },
76
+ {
77
+ "cell_type": "code",
78
+ "execution_count": 1,
79
+ "id": "b693482a-21c0-483f-bb46-deb050ce82ee",
80
+ "metadata": {},
81
+ "outputs": [
82
+ {
83
+ "name": "stdout",
84
+ "output_type": "stream",
85
+ "text": [
86
+ "* Running on local URL: http://127.0.0.1:7860\n",
87
+ "* To create a public link, set `share=True` in `launch()`.\n"
88
+ ]
89
+ },
90
+ {
91
+ "data": {
92
+ "text/html": [
93
+ "<div><iframe src=\"http://127.0.0.1:7860/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
94
+ ],
95
+ "text/plain": [
96
+ "<IPython.core.display.HTML object>"
97
+ ]
98
+ },
99
+ "metadata": {},
100
+ "output_type": "display_data"
101
+ },
102
+ {
103
+ "data": {
104
+ "text/plain": []
105
+ },
106
+ "execution_count": 1,
107
+ "metadata": {},
108
+ "output_type": "execute_result"
109
+ }
110
+ ],
111
+ "source": [
112
+ "import gradio as gr\n",
113
+ "from transformers import GPT2LMHeadModel, GPT2Tokenizer\n",
114
+ "import torch\n",
115
+ "\n",
116
+ "# بارگذاری مدل و توکنایزر\n",
117
+ "model_name = \"HooshvareLab/gpt2-fa\"\n",
118
+ "model = GPT2LMHeadModel.from_pretrained(model_name)\n",
119
+ "tokenizer = GPT2Tokenizer.from_pretrained(model_name)\n",
120
+ "\n",
121
+ "# تنظیم مدل برای تولید متن\n",
122
+ "model.eval()\n",
123
+ "\n",
124
+ "# تابعی برای تولید متن\n",
125
+ "def generate_text(prompt):\n",
126
+ " input_ids = tokenizer.encode(prompt, return_tensors='pt')\n",
127
+ " \n",
128
+ " # تولید متن\n",
129
+ " with torch.no_grad():\n",
130
+ " output = model.generate(input_ids, max_length=100, num_return_sequences=1, no_repeat_ngram_size=2, temperature=0.7)\n",
131
+ "\n",
132
+ " # تبدیل توکن‌های خروجی به متن\n",
133
+ " generated_text = tokenizer.decode(output[0], skip_special_tokens=True)\n",
134
+ " \n",
135
+ " return generated_text\n",
136
+ "\n",
137
+ "# تعریف رابط کاربری با Gradio\n",
138
+ "iface = gr.Interface(fn=generate_text, inputs=\"text\", outputs=\"text\", live=True, title=\"تولید متن فارسی با مدل GPT-2\")\n",
139
+ "\n",
140
+ "# اجرا کردن رابط\n",
141
+ "iface.launch()\n"
142
+ ]
143
+ },
144
+ {
145
+ "cell_type": "code",
146
+ "execution_count": null,
147
+ "id": "fc890196-c4bf-4b01-ab3c-0796836f9355",
148
+ "metadata": {},
149
+ "outputs": [],
150
+ "source": []
151
+ }
152
+ ],
153
+ "metadata": {
154
+ "kernelspec": {
155
+ "display_name": "Python [conda env:base] *",
156
+ "language": "python",
157
+ "name": "conda-base-py"
158
+ },
159
+ "language_info": {
160
+ "codemirror_mode": {
161
+ "name": "ipython",
162
+ "version": 3
163
+ },
164
+ "file_extension": ".py",
165
+ "mimetype": "text/x-python",
166
+ "name": "python",
167
+ "nbconvert_exporter": "python",
168
+ "pygments_lexer": "ipython3",
169
+ "version": "3.12.7"
170
+ }
171
+ },
172
+ "nbformat": 4,
173
+ "nbformat_minor": 5
174
+ }
opencv_test/Untitled2.ipynb ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "id": "f8a34a5c-7d7b-4c3f-ac3b-72fb8361d148",
7
+ "metadata": {
8
+ "scrolled": true
9
+ },
10
+ "outputs": [],
11
+ "source": [
12
+ "import gradio as gr\n",
13
+ "from transformers import AutoTokenizer, AutoModelForCausalLM\n",
14
+ "import torch\n",
15
+ "\n",
16
+ "# بارگذاری مدل و توکنایزر\n",
17
+ "tokenizer = AutoTokenizer.from_pretrained(\"HooshvareLab/gpt2-fa\")\n",
18
+ "model = AutoModelForCausalLM.from_pretrained(\"HooshvareLab/gpt2-fa\")\n",
19
+ "\n",
20
+ "# تابع تولید متن با تنظیمات دقیق\n",
21
+ "def generate_text(prompt):\n",
22
+ " input_ids = tokenizer.encode(prompt, return_tensors=\"pt\")\n",
23
+ " output = model.generate(\n",
24
+ " input_ids,\n",
25
+ " max_new_tokens=256,\n",
26
+ " do_sample=True,\n",
27
+ " temperature=0.7,\n",
28
+ " top_k=50,\n",
29
+ " top_p=0.92,\n",
30
+ " repetition_penalty=1.3,\n",
31
+ " no_repeat_ngram_size=3,\n",
32
+ " pad_token_id=tokenizer.eos_token_id,\n",
33
+ " eos_token_id=tokenizer.eos_token_id\n",
34
+ " )\n",
35
+ " return tokenizer.decode(output[0], skip_special_tokens=True)\n",
36
+ "\n",
37
+ "# ساخت رابط گرافیکی با Gradio\n",
38
+ "gr.Interface(\n",
39
+ " fn=generate_text,\n",
40
+ " inputs=gr.Textbox(label=\"📝 متن ورودی (پرامپت)\", placeholder=\"مثلاً: در یک روز بهاری،\", lines=3),\n",
41
+ " outputs=gr.Textbox(label=\"📄 متن تولید شده\"),\n",
42
+ " title=\"💬 تولید متن فارسی دقیق با GPT2\",\n",
43
+ " description=\"این ابزار از مدل bolbolzaban/gpt2-persian استفاده می‌کند و متن روان و طبیعی به زبان فارسی تولید می‌کند.\",\n",
44
+ " theme=\"soft\"\n",
45
+ ").launch()\n"
46
+ ]
47
+ },
48
+ {
49
+ "cell_type": "code",
50
+ "execution_count": null,
51
+ "id": "33b7f00a-888b-4e51-9453-3c9b7f2e82c0",
52
+ "metadata": {},
53
+ "outputs": [],
54
+ "source": []
55
+ }
56
+ ],
57
+ "metadata": {
58
+ "kernelspec": {
59
+ "display_name": "Python [conda env:base] *",
60
+ "language": "python",
61
+ "name": "conda-base-py"
62
+ },
63
+ "language_info": {
64
+ "codemirror_mode": {
65
+ "name": "ipython",
66
+ "version": 3
67
+ },
68
+ "file_extension": ".py",
69
+ "mimetype": "text/x-python",
70
+ "name": "python",
71
+ "nbconvert_exporter": "python",
72
+ "pygments_lexer": "ipython3",
73
+ "version": "3.12.7"
74
+ }
75
+ },
76
+ "nbformat": 4,
77
+ "nbformat_minor": 5
78
+ }
opencv_test/Untitled3.ipynb ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "ebc05db3-a28f-4f6c-8ebc-649d9b3012ca",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "data": {
11
+ "application/vnd.jupyter.widget-view+json": {
12
+ "model_id": "1b61a5c74bbf4d45b2b1c469682586e5",
13
+ "version_major": 2,
14
+ "version_minor": 0
15
+ },
16
+ "text/plain": [
17
+ "tokenizer_config.json: 0%| | 0.00/50.8k [00:00<?, ?B/s]"
18
+ ]
19
+ },
20
+ "metadata": {},
21
+ "output_type": "display_data"
22
+ },
23
+ {
24
+ "data": {
25
+ "application/vnd.jupyter.widget-view+json": {
26
+ "model_id": "dd9d2a48c24546d59e49ea586ffc47e9",
27
+ "version_major": 2,
28
+ "version_minor": 0
29
+ },
30
+ "text/plain": [
31
+ "tokenizer.json: 0%| | 0.00/9.09M [00:00<?, ?B/s]"
32
+ ]
33
+ },
34
+ "metadata": {},
35
+ "output_type": "display_data"
36
+ },
37
+ {
38
+ "data": {
39
+ "application/vnd.jupyter.widget-view+json": {
40
+ "model_id": "70e69a427f724affb5eab8ccf71a9f6c",
41
+ "version_major": 2,
42
+ "version_minor": 0
43
+ },
44
+ "text/plain": [
45
+ "special_tokens_map.json: 0%| | 0.00/73.0 [00:00<?, ?B/s]"
46
+ ]
47
+ },
48
+ "metadata": {},
49
+ "output_type": "display_data"
50
+ },
51
+ {
52
+ "data": {
53
+ "application/vnd.jupyter.widget-view+json": {
54
+ "model_id": "c34b9f14434942229268d03748061964",
55
+ "version_major": 2,
56
+ "version_minor": 0
57
+ },
58
+ "text/plain": [
59
+ "config.json: 0%| | 0.00/844 [00:00<?, ?B/s]"
60
+ ]
61
+ },
62
+ "metadata": {},
63
+ "output_type": "display_data"
64
+ },
65
+ {
66
+ "ename": "ValueError",
67
+ "evalue": "The repository for microsoft/bitnet-b1.58-2B-4T contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/microsoft/bitnet-b1.58-2B-4T.\nPlease pass the argument `trust_remote_code=True` to allow custom code to be run.",
68
+ "output_type": "error",
69
+ "traceback": [
70
+ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
71
+ "\u001b[1;31mAttributeError\u001b[0m Traceback (most recent call last)",
72
+ "File \u001b[1;32m~\\anaconda3\\Lib\\site-packages\\transformers\\dynamic_module_utils.py:666\u001b[0m, in \u001b[0;36mresolve_trust_remote_code\u001b[1;34m(trust_remote_code, model_name, has_local_code, has_remote_code)\u001b[0m\n\u001b[0;32m 665\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m--> 666\u001b[0m prev_sig_handler \u001b[38;5;241m=\u001b[39m signal\u001b[38;5;241m.\u001b[39msignal(signal\u001b[38;5;241m.\u001b[39mSIGALRM, _raise_timeout_error)\n\u001b[0;32m 667\u001b[0m signal\u001b[38;5;241m.\u001b[39malarm(TIME_OUT_REMOTE_CODE)\n",
73
+ "\u001b[1;31mAttributeError\u001b[0m: module 'signal' has no attribute 'SIGALRM'",
74
+ "\nDuring handling of the above exception, another exception occurred:\n",
75
+ "\u001b[1;31mValueError\u001b[0m Traceback (most recent call last)",
76
+ "Cell \u001b[1;32mIn[1], line 8\u001b[0m\n\u001b[0;32m 6\u001b[0m \u001b[38;5;66;03m# Load tokenizer and model\u001b[39;00m\n\u001b[0;32m 7\u001b[0m tokenizer \u001b[38;5;241m=\u001b[39m AutoTokenizer\u001b[38;5;241m.\u001b[39mfrom_pretrained(model_id)\n\u001b[1;32m----> 8\u001b[0m model \u001b[38;5;241m=\u001b[39m AutoModelForCausalLM\u001b[38;5;241m.\u001b[39mfrom_pretrained(\n\u001b[0;32m 9\u001b[0m model_id,\n\u001b[0;32m 10\u001b[0m torch_dtype\u001b[38;5;241m=\u001b[39mtorch\u001b[38;5;241m.\u001b[39mbfloat16\n\u001b[0;32m 11\u001b[0m )\n\u001b[0;32m 13\u001b[0m \u001b[38;5;66;03m# Apply the chat template\u001b[39;00m\n\u001b[0;32m 14\u001b[0m messages \u001b[38;5;241m=\u001b[39m [\n\u001b[0;32m 15\u001b[0m {\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mrole\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124msystem\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcontent\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mYou are a helpful AI assistant.\u001b[39m\u001b[38;5;124m\"\u001b[39m},\n\u001b[0;32m 16\u001b[0m {\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mrole\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124muser\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcontent\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mHow are you?\u001b[39m\u001b[38;5;124m\"\u001b[39m},\n\u001b[0;32m 17\u001b[0m ]\n",
77
+ "File \u001b[1;32m~\\anaconda3\\Lib\\site-packages\\transformers\\models\\auto\\auto_factory.py:531\u001b[0m, in \u001b[0;36m_BaseAutoModelClass.from_pretrained\u001b[1;34m(cls, pretrained_model_name_or_path, *model_args, **kwargs)\u001b[0m\n\u001b[0;32m 528\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m kwargs\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mquantization_config\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;28;01mNone\u001b[39;00m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m 529\u001b[0m _ \u001b[38;5;241m=\u001b[39m kwargs\u001b[38;5;241m.\u001b[39mpop(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mquantization_config\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m--> 531\u001b[0m config, kwargs \u001b[38;5;241m=\u001b[39m AutoConfig\u001b[38;5;241m.\u001b[39mfrom_pretrained(\n\u001b[0;32m 532\u001b[0m pretrained_model_name_or_path,\n\u001b[0;32m 533\u001b[0m return_unused_kwargs\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m,\n\u001b[0;32m 534\u001b[0m trust_remote_code\u001b[38;5;241m=\u001b[39mtrust_remote_code,\n\u001b[0;32m 535\u001b[0m code_revision\u001b[38;5;241m=\u001b[39mcode_revision,\n\u001b[0;32m 536\u001b[0m _commit_hash\u001b[38;5;241m=\u001b[39mcommit_hash,\n\u001b[0;32m 537\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mhub_kwargs,\n\u001b[0;32m 538\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs,\n\u001b[0;32m 539\u001b[0m )\n\u001b[0;32m 541\u001b[0m \u001b[38;5;66;03m# if torch_dtype=auto was passed here, ensure to pass it on\u001b[39;00m\n\u001b[0;32m 542\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m kwargs_orig\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtorch_dtype\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;28;01mNone\u001b[39;00m) \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mauto\u001b[39m\u001b[38;5;124m\"\u001b[39m:\n",
78
+ "File \u001b[1;32m~\\anaconda3\\Lib\\site-packages\\transformers\\models\\auto\\configuration_auto.py:1117\u001b[0m, in \u001b[0;36mAutoConfig.from_pretrained\u001b[1;34m(cls, pretrained_model_name_or_path, **kwargs)\u001b[0m\n\u001b[0;32m 1115\u001b[0m has_remote_code \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mauto_map\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;129;01min\u001b[39;00m config_dict \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mAutoConfig\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;129;01min\u001b[39;00m config_dict[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mauto_map\u001b[39m\u001b[38;5;124m\"\u001b[39m]\n\u001b[0;32m 1116\u001b[0m has_local_code \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmodel_type\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;129;01min\u001b[39;00m config_dict \u001b[38;5;129;01mand\u001b[39;00m config_dict[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmodel_type\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;129;01min\u001b[39;00m CONFIG_MAPPING\n\u001b[1;32m-> 1117\u001b[0m trust_remote_code \u001b[38;5;241m=\u001b[39m resolve_trust_remote_code(\n\u001b[0;32m 1118\u001b[0m trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code\n\u001b[0;32m 1119\u001b[0m )\n\u001b[0;32m 1121\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m has_remote_code \u001b[38;5;129;01mand\u001b[39;00m trust_remote_code:\n\u001b[0;32m 1122\u001b[0m class_ref \u001b[38;5;241m=\u001b[39m config_dict[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mauto_map\u001b[39m\u001b[38;5;124m\"\u001b[39m][\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mAutoConfig\u001b[39m\u001b[38;5;124m\"\u001b[39m]\n",
79
+ "File \u001b[1;32m~\\anaconda3\\Lib\\site-packages\\transformers\\dynamic_module_utils.py:682\u001b[0m, in \u001b[0;36mresolve_trust_remote_code\u001b[1;34m(trust_remote_code, model_name, has_local_code, has_remote_code)\u001b[0m\n\u001b[0;32m 679\u001b[0m signal\u001b[38;5;241m.\u001b[39malarm(\u001b[38;5;241m0\u001b[39m)\n\u001b[0;32m 680\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m:\n\u001b[0;32m 681\u001b[0m \u001b[38;5;66;03m# OS which does not support signal.SIGALRM\u001b[39;00m\n\u001b[1;32m--> 682\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[0;32m 683\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mThe repository for \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mmodel_name\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m contains custom code which must be executed to correctly \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 684\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mload the model. You can inspect the repository content at https://hf.co/\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mmodel_name\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m.\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 685\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mPlease pass the argument `trust_remote_code=True` to allow custom code to be run.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 686\u001b[0m )\n\u001b[0;32m 687\u001b[0m \u001b[38;5;28;01mfinally\u001b[39;00m:\n\u001b[0;32m 688\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m prev_sig_handler \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
80
+ "\u001b[1;31mValueError\u001b[0m: The repository for microsoft/bitnet-b1.58-2B-4T contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/microsoft/bitnet-b1.58-2B-4T.\nPlease pass the argument `trust_remote_code=True` to allow custom code to be run."
81
+ ]
82
+ }
83
+ ],
84
+ "source": [
85
+ "import torch\n",
86
+ "from transformers import AutoModelForCausalLM, AutoTokenizer\n",
87
+ "\n",
88
+ "model_id = \"microsoft/bitnet-b1.58-2B-4T\"\n",
89
+ "\n",
90
+ "# Load tokenizer and model\n",
91
+ "tokenizer = AutoTokenizer.from_pretrained(model_id)\n",
92
+ "model = AutoModelForCausalLM.from_pretrained(\n",
93
+ " model_id,\n",
94
+ " torch_dtype=torch.bfloat16\n",
95
+ ")\n",
96
+ "\n",
97
+ "# Apply the chat template\n",
98
+ "messages = [\n",
99
+ " {\"role\": \"system\", \"content\": \"You are a helpful AI assistant.\"},\n",
100
+ " {\"role\": \"user\", \"content\": \"How are you?\"},\n",
101
+ "]\n",
102
+ "prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n",
103
+ "chat_input = tokenizer(prompt, return_tensors=\"pt\").to(model.device)\n",
104
+ "\n",
105
+ "# Generate response\n",
106
+ "chat_outputs = model.generate(**chat_input, max_new_tokens=50)\n",
107
+ "response = tokenizer.decode(chat_outputs[0][chat_input['input_ids'].shape[-1]:], skip_special_tokens=True) # Decode only the response part\n",
108
+ "print(\"\\nAssistant Response:\", response)\n"
109
+ ]
110
+ },
111
+ {
112
+ "cell_type": "code",
113
+ "execution_count": null,
114
+ "id": "2b6c0add-3a67-47df-be9d-ed78bbd08a16",
115
+ "metadata": {},
116
+ "outputs": [],
117
+ "source": []
118
+ }
119
+ ],
120
+ "metadata": {
121
+ "kernelspec": {
122
+ "display_name": "Python [conda env:base] *",
123
+ "language": "python",
124
+ "name": "conda-base-py"
125
+ },
126
+ "language_info": {
127
+ "codemirror_mode": {
128
+ "name": "ipython",
129
+ "version": 3
130
+ },
131
+ "file_extension": ".py",
132
+ "mimetype": "text/x-python",
133
+ "name": "python",
134
+ "nbconvert_exporter": "python",
135
+ "pygments_lexer": "ipython3",
136
+ "version": "3.12.7"
137
+ }
138
+ },
139
+ "nbformat": 4,
140
+ "nbformat_minor": 5
141
+ }
opencv_test/Untitled4.ipynb ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 37,
6
+ "id": "34a23b48-2ef5-42d3-859c-bf1ce6bf414c",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stdout",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "تا ۵ ثانیه وقت داری پنجره تلگرام رو فعال کنی...\n",
14
+ "ارسال متوقف شد.\n"
15
+ ]
16
+ }
17
+ ],
18
+ "source": [
19
+ "import pyautogui\n",
20
+ "import time\n",
21
+ "\n",
22
+ "# چند ثانیه صبر کن تا بتونی پنجره تلگرام رو فعال کنی\n",
23
+ "print(\"تا ۵ ثانیه وقت داری پنجره تلگرام رو فعال کنی...\")\n",
24
+ "\n",
25
+ "time.sleep(10)\n",
26
+ "\n",
27
+ "try:\n",
28
+ " while True:\n",
29
+ " pyautogui.write(\".\")\n",
30
+ " pyautogui.press('enter') # هر ۵۰ میلی‌ثانیه یک پیام می‌فرسته\n",
31
+ "except KeyboardInterrupt:\n",
32
+ " print(\"ارسال متوقف شد.\")\n",
33
+ "\n",
34
+ "\n",
35
+ "\n",
36
+ "\n"
37
+ ]
38
+ },
39
+ {
40
+ "cell_type": "code",
41
+ "execution_count": 9,
42
+ "id": "46f24ef6-81b2-493b-96eb-e3b72aec8df8",
43
+ "metadata": {},
44
+ "outputs": [
45
+ {
46
+ "ename": "SyntaxError",
47
+ "evalue": "invalid syntax (46814942.py, line 1)",
48
+ "output_type": "error",
49
+ "traceback": [
50
+ "\u001b[1;36m Cell \u001b[1;32mIn[9], line 1\u001b[1;36m\u001b[0m\n\u001b[1;33m .\u001b[0m\n\u001b[1;37m ^\u001b[0m\n\u001b[1;31mSyntaxError\u001b[0m\u001b[1;31m:\u001b[0m invalid syntax\n"
51
+ ]
52
+ }
53
+ ],
54
+ "source": [
55
+ ".\n",
56
+ ".\n",
57
+ ".\n",
58
+ ".\n",
59
+ "..8923333\n",
60
+ "8923333\n",
61
+ "8923333\n",
62
+ ".\n",
63
+ ".\n",
64
+ ".\n",
65
+ ".\n",
66
+ ".\n",
67
+ "\n",
68
+ "8923333\n",
69
+ "8923333\n",
70
+ "333\n",
71
+ "8923333\n",
72
+ "8923333\n",
73
+ "8923333\n",
74
+ "\n",
75
+ "..\n",
76
+ "\n",
77
+ "*\n",
78
+ "*\n",
79
+ " *\n",
80
+ "\n",
81
+ "\n",
82
+ "\n",
83
+ "\n",
84
+ "\n",
85
+ "\n",
86
+ "\n",
87
+ "\n",
88
+ "\n",
89
+ "\n",
90
+ "\n",
91
+ "\n",
92
+ "\n",
93
+ "\n",
94
+ " \n",
95
+ " *\n",
96
+ "*\n",
97
+ " *\n",
98
+ " *\n",
99
+ "*\n",
100
+ " *\n",
101
+ " *\n",
102
+ "*\n",
103
+ " *\n",
104
+ " *\n",
105
+ "*\n",
106
+ "\n",
107
+ "\n",
108
+ "\n",
109
+ "\n",
110
+ "\n",
111
+ "\n",
112
+ ".\n",
113
+ "\n",
114
+ "\n",
115
+ "\n",
116
+ "\n",
117
+ "\n",
118
+ "\n",
119
+ "\n",
120
+ ".\n",
121
+ ".\n",
122
+ ".\n",
123
+ ".*99123\n",
124
+ "*99123\n",
125
+ "*99123\n",
126
+ "*99123\n",
127
+ "*99123\n",
128
+ "*99123\n",
129
+ "\n",
130
+ ".\n",
131
+ ".\n",
132
+ ".\n",
133
+ ".\n",
134
+ ".\n",
135
+ ".\n",
136
+ ".\n",
137
+ ".\n",
138
+ "\n",
139
+ ".\n",
140
+ ".\n",
141
+ ".\n",
142
+ "..\n",
143
+ ".\n",
144
+ ".\n",
145
+ ".\n",
146
+ ".\n",
147
+ "..\n",
148
+ "..\n",
149
+ ".\n",
150
+ ".\n",
151
+ "..\n",
152
+ ".\n",
153
+ ".\n",
154
+ ".\n",
155
+ ".\n",
156
+ "..\n",
157
+ ".\n",
158
+ ".\n",
159
+ ".\n",
160
+ ".\n",
161
+ "."
162
+ ]
163
+ },
164
+ {
165
+ "cell_type": "code",
166
+ "execution_count": null,
167
+ "id": "939146aa-0063-4bba-90ec-ea9d637dda07",
168
+ "metadata": {},
169
+ "outputs": [],
170
+ "source": []
171
+ }
172
+ ],
173
+ "metadata": {
174
+ "kernelspec": {
175
+ "display_name": "Python [conda env:base] *",
176
+ "language": "python",
177
+ "name": "conda-base-py"
178
+ },
179
+ "language_info": {
180
+ "codemirror_mode": {
181
+ "name": "ipython",
182
+ "version": 3
183
+ },
184
+ "file_extension": ".py",
185
+ "mimetype": "text/x-python",
186
+ "name": "python",
187
+ "nbconvert_exporter": "python",
188
+ "pygments_lexer": "ipython3",
189
+ "version": "3.12.7"
190
+ }
191
+ },
192
+ "nbformat": 4,
193
+ "nbformat_minor": 5
194
+ }
opencv_test/Untitled5.ipynb ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "1dabd320-903a-4f6c-9634-e8bd5993f90f",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stdout",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "* Running on local URL: http://127.0.0.1:7860\n",
14
+ "* To create a public link, set `share=True` in `launch()`.\n"
15
+ ]
16
+ },
17
+ {
18
+ "data": {
19
+ "text/html": [
20
+ "<div><iframe src=\"http://127.0.0.1:7860/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
21
+ ],
22
+ "text/plain": [
23
+ "<IPython.core.display.HTML object>"
24
+ ]
25
+ },
26
+ "metadata": {},
27
+ "output_type": "display_data"
28
+ },
29
+ {
30
+ "data": {
31
+ "text/plain": []
32
+ },
33
+ "execution_count": 1,
34
+ "metadata": {},
35
+ "output_type": "execute_result"
36
+ }
37
+ ],
38
+ "source": [
39
+ "from transformers import AutoModelForCausalLM, AutoTokenizer\n",
40
+ "import torch\n",
41
+ "import gradio as gr\n",
42
+ "\n",
43
+ "# مدل و توکنایزر\n",
44
+ "model_name = \"HuggingFaceTB/SmolLM2-360M-Instruct\"\n",
45
+ "tokenizer = AutoTokenizer.from_pretrained(model_name)\n",
46
+ "model = AutoModelForCausalLM.from_pretrained(model_name)\n",
47
+ "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
48
+ "model.to(device)\n",
49
+ "model.eval()\n",
50
+ "\n",
51
+ "# پیام‌های اولیه مکالمه\n",
52
+ "def format_messages(user_prompt):\n",
53
+ " system_message = \"<|im_start|>system\\n<|im_end|>\\n\"\n",
54
+ " user_message = f\"<|im_start|>user\\n{user_prompt}<|im_end|>\\n\"\n",
55
+ " assistant_prefix = \"<|im_start|>assistant\\n\"\n",
56
+ " full_prompt = system_message + user_message + assistant_prefix\n",
57
+ " return full_prompt\n",
58
+ "\n",
59
+ "# تابع چت\n",
60
+ "def chat(user_input):\n",
61
+ " prompt = format_messages(user_input)\n",
62
+ " inputs = tokenizer(prompt, return_tensors=\"pt\").to(device)\n",
63
+ "\n",
64
+ " with torch.no_grad():\n",
65
+ " outputs = model.generate(\n",
66
+ " **inputs,\n",
67
+ " max_new_tokens=256,\n",
68
+ " do_sample=True,\n",
69
+ " temperature=0.7,\n",
70
+ " top_p=0.9,\n",
71
+ " pad_token_id=tokenizer.eos_token_id\n",
72
+ " )\n",
73
+ "\n",
74
+ " response = tokenizer.decode(outputs[0], skip_special_tokens=True)\n",
75
+ " \n",
76
+ " # پاسخ مدل بعد از آخرین <|im_start|>assistant\n",
77
+ " if \"<|im_start|>assistant\" in response:\n",
78
+ " response = response.split(\"<|im_start|>assistant\")[-1].strip()\n",
79
+ " if \"<|im_end|>\" in response:\n",
80
+ " response = response.split(\"<|im_end|>\")[0].strip()\n",
81
+ " \n",
82
+ " return response\n",
83
+ "\n",
84
+ "# رابط گرافیکی Gradio\n",
85
+ "interface = gr.Interface(\n",
86
+ " fn=chat,\n",
87
+ " inputs=gr.Textbox(lines=3, placeholder=\"پیام خود را وارد کنید...\"),\n",
88
+ " outputs=\"text\",\n",
89
+ " title=\"💬 SmolLM2 Chatbot\",\n",
90
+ " description=\"مدل سبک و مکالمه‌محور SmolLM2 از Hugging Face با فرمت قالب رسمی\"\n",
91
+ ")\n",
92
+ "\n",
93
+ "interface.launch()\n"
94
+ ]
95
+ },
96
+ {
97
+ "cell_type": "code",
98
+ "execution_count": null,
99
+ "id": "0f00bd7a-f925-4970-aeb6-96f1dcbad3c8",
100
+ "metadata": {},
101
+ "outputs": [],
102
+ "source": []
103
+ }
104
+ ],
105
+ "metadata": {
106
+ "kernelspec": {
107
+ "display_name": "Python [conda env:base] *",
108
+ "language": "python",
109
+ "name": "conda-base-py"
110
+ },
111
+ "language_info": {
112
+ "codemirror_mode": {
113
+ "name": "ipython",
114
+ "version": 3
115
+ },
116
+ "file_extension": ".py",
117
+ "mimetype": "text/x-python",
118
+ "name": "python",
119
+ "nbconvert_exporter": "python",
120
+ "pygments_lexer": "ipython3",
121
+ "version": "3.12.7"
122
+ }
123
+ },
124
+ "nbformat": 4,
125
+ "nbformat_minor": 5
126
+ }
opencv_test/Untitled6.ipynb ADDED
@@ -0,0 +1,297 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "id": "79c435c0-31ba-4c2f-81b3-90d8e670eb76",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "import numpy as np\n",
11
+ "import matplotlib.pyplot as plt\n",
12
+ "from matplotlib.animation import FuncAnimation\n",
13
+ "from mpl_toolkits.mplot3d import Axes3D, art3d\n",
14
+ "import random\n",
15
+ "\n",
16
+ "# Parameters\n",
17
+ "WORLD_SIZE = 100000\n",
18
+ "AIRCRAFT_COUNT = 100\n",
19
+ "RADAR_RANGE = 70000\n",
20
+ "RADAR_ALTITUDE_LIMIT = 20000 # max altitude radar covers in meters\n",
21
+ "SCAN_SPEED = 2.0 # degrees per frame\n",
22
+ "BEAM_WIDTH = 5.0 # degrees width of radar beam\n",
23
+ "TRACK_LENGTH = 20 # length of tail/track for aircrafts\n",
24
+ "MAX_ACCELERATION = 5 # m/s^2 max change in velocity per frame\n",
25
+ "\n",
26
+ "# Aircraft types with properties\n",
27
+ "AIRCRAFT_TYPES = {\n",
28
+ " 'commercial': {'rcs_range': (10, 20), 'color': 'cyan', 'size': 30},\n",
29
+ " 'military': {'rcs_range': (5, 12), 'color': 'red', 'size': 40},\n",
30
+ " 'drone': {'rcs_range': (1, 4), 'color': 'yellow', 'size': 20},\n",
31
+ " 'unknown': {'rcs_range': (0.5, 2), 'color': 'magenta', 'size': 25}\n",
32
+ "}\n",
33
+ "\n",
34
+ "# Event Class with motion\n",
35
+ "class MovingEvent3D:\n",
36
+ " def __init__(self, evt_type, center, radius, altitude, velocity):\n",
37
+ " self.type = evt_type\n",
38
+ " self.center = np.array(center, dtype=float)\n",
39
+ " self.radius = radius\n",
40
+ " self.altitude = altitude\n",
41
+ " self.velocity = np.array(velocity, dtype=float)\n",
42
+ " self.active = True\n",
43
+ " \n",
44
+ " def update(self):\n",
45
+ " self.center += self.velocity\n",
46
+ " # Bounce inside world bounds for x,y\n",
47
+ " for i in [0, 1]:\n",
48
+ " if self.center[i] < 0 or self.center[i] > WORLD_SIZE:\n",
49
+ " self.velocity[i] = -self.velocity[i]\n",
50
+ " self.center[i] = np.clip(self.center[i], 0, WORLD_SIZE)\n",
51
+ " # Bounce altitude inside radar altitude limit\n",
52
+ " if self.altitude < 0 or self.altitude > RADAR_ALTITUDE_LIMIT:\n",
53
+ " self.velocity[2] = -self.velocity[2]\n",
54
+ " self.altitude = np.clip(self.altitude, 0, RADAR_ALTITUDE_LIMIT)\n",
55
+ " # Random on/off toggle for event activity\n",
56
+ " if random.random() < 0.001:\n",
57
+ " self.active = not self.active\n",
58
+ "\n",
59
+ "def generate_moving_events_3d():\n",
60
+ " events = []\n",
61
+ " for _ in range(4):\n",
62
+ " evt_type = random.choice(['storm', 'no-fly-zone', 'jamming', 'interference'])\n",
63
+ " center = np.random.uniform(0, WORLD_SIZE, 2)\n",
64
+ " altitude = np.random.uniform(0, RADAR_ALTITUDE_LIMIT)\n",
65
+ " radius = {'storm': 15000, 'no-fly-zone': 10000, 'jamming': 8000, 'interference':12000}[evt_type]\n",
66
+ " velocity = np.random.uniform(-50, 50, 3)\n",
67
+ " events.append(MovingEvent3D(evt_type, center, radius, altitude, velocity))\n",
68
+ " return events\n",
69
+ "\n",
70
+ "world_events = generate_moving_events_3d()\n",
71
+ "\n",
72
+ "# Generate aircrafts with altitude, track history, type and variable velocity\n",
73
+ "def generate_aircraft_3d():\n",
74
+ " aircrafts = []\n",
75
+ " for i in range(AIRCRAFT_COUNT):\n",
76
+ " ac_type = random.choices(list(AIRCRAFT_TYPES.keys()), weights=[0.5,0.3,0.15,0.05])[0]\n",
77
+ " rcs_min, rcs_max = AIRCRAFT_TYPES[ac_type]['rcs_range']\n",
78
+ " ac = {\n",
79
+ " 'id': i,\n",
80
+ " 'type': ac_type,\n",
81
+ " 'position': np.array([*np.random.uniform(0, WORLD_SIZE, 2), np.random.uniform(0, RADAR_ALTITUDE_LIMIT)]),\n",
82
+ " 'velocity': np.random.uniform(-50, 50, 3),\n",
83
+ " 'rcs': random.uniform(rcs_min, rcs_max),\n",
84
+ " 'callsign': f\"{ac_type[:2].upper()}{i:03}\",\n",
85
+ " 'emergency': random.random() < 0.03,\n",
86
+ " 'track': [],\n",
87
+ " 'acceleration': np.zeros(3),\n",
88
+ " }\n",
89
+ " aircrafts.append(ac)\n",
90
+ " return aircrafts\n",
91
+ "\n",
92
+ "aircrafts = generate_aircraft_3d()\n",
93
+ "radar_angle = [0]\n",
94
+ "radar_pos = np.array([WORLD_SIZE/2, WORLD_SIZE/2, 0])\n",
95
+ "paused = [False]\n",
96
+ "\n",
97
+ "def is_event_active_3d(pos):\n",
98
+ " for evt in world_events:\n",
99
+ " if evt.active:\n",
100
+ " d_xy = np.linalg.norm(pos[:2] - evt.center)\n",
101
+ " dz = abs(pos[2] - evt.altitude)\n",
102
+ " if d_xy < evt.radius and dz < evt.radius / 2:\n",
103
+ " return evt.type\n",
104
+ " return None\n",
105
+ "\n",
106
+ "def detect_3d(ac, radar_pos):\n",
107
+ " delta = ac['position'] - radar_pos\n",
108
+ " rng = np.linalg.norm(delta)\n",
109
+ " if rng > RADAR_RANGE or ac['position'][2] > RADAR_ALTITUDE_LIMIT:\n",
110
+ " return False\n",
111
+ " bearing = (np.degrees(np.arctan2(delta[1], delta[0])) + 360) % 360\n",
112
+ " diff = abs((bearing - radar_angle[0] + 180) % 360 - 180)\n",
113
+ " if diff > BEAM_WIDTH / 2:\n",
114
+ " return False\n",
115
+ " evt = is_event_active_3d(ac['position'])\n",
116
+ " snr_val = 20 - 20*np.log10(rng + 1) + ac['rcs']\n",
117
+ " if evt == 'jamming':\n",
118
+ " snr_val -= 50\n",
119
+ " elif evt == 'storm':\n",
120
+ " snr_val -= 15\n",
121
+ " elif evt == 'interference':\n",
122
+ " snr_val -= 25\n",
123
+ " prob = 1 / (1 + np.exp(-(snr_val - 10)))\n",
124
+ " # Introduce random detection noise\n",
125
+ " noise = np.random.normal(0, 0.1)\n",
126
+ " return np.random.rand() < (prob + noise)\n",
127
+ "\n",
128
+ "# Setup plot\n",
129
+ "fig = plt.figure(figsize=(14, 10))\n",
130
+ "ax = fig.add_subplot(111, projection='3d')\n",
131
+ "ax.set_xlim(0, WORLD_SIZE)\n",
132
+ "ax.set_ylim(0, WORLD_SIZE)\n",
133
+ "ax.set_zlim(0, RADAR_ALTITUDE_LIMIT)\n",
134
+ "ax.set_facecolor('black')\n",
135
+ "\n",
136
+ "# Scatter for different types of aircrafts (dynamic update)\n",
137
+ "all_scatter = ax.scatter([], [], [], c=[], s=[], label='Aircraft')\n",
138
+ "detected_scatter = ax.scatter([], [], [], c='lime', s=60, label='Detected')\n",
139
+ "emergency_scatter = ax.scatter([], [], [], c='orange', s=80, marker='^', label='Emergency')\n",
140
+ "radar_sweep_line, = ax.plot([], [], [], c='cyan', linewidth=3, label='Radar Sweep')\n",
141
+ "\n",
142
+ "# Track lines for aircrafts\n",
143
+ "track_lines = [ax.plot([], [], [], c='white', alpha=0.3, linewidth=1)[0] for _ in range(AIRCRAFT_COUNT)]\n",
144
+ "\n",
145
+ "event_spheres = []\n",
146
+ "event_colors = {'storm':'blue', 'no-fly-zone':'yellow', 'jamming':'magenta', 'interference':'purple'}\n",
147
+ "\n",
148
+ "def plot_sphere(center, radius, color):\n",
149
+ " u = np.linspace(0, 2*np.pi, 20)\n",
150
+ " v = np.linspace(0, np.pi, 20)\n",
151
+ " x = center[0] + radius * np.outer(np.cos(u), np.sin(v))\n",
152
+ " y = center[1] + radius * np.outer(np.sin(u), np.sin(v))\n",
153
+ " z = center[2] + radius * np.outer(np.ones(np.size(u)), np.cos(v))\n",
154
+ " return ax.plot_surface(x, y, z, color=color, alpha=0.15)\n",
155
+ "\n",
156
+ "for evt in world_events:\n",
157
+ " sphere = plot_sphere(np.array([*evt.center, evt.altitude]), evt.radius, event_colors[evt.type])\n",
158
+ " event_spheres.append(sphere)\n",
159
+ "\n",
160
+ "# Radar range circle on ground\n",
161
+ "radar_circle = plt.Circle((radar_pos[0], radar_pos[1]), RADAR_RANGE, color='cyan', alpha=0.1)\n",
162
+ "ax.add_patch(radar_circle)\n",
163
+ "art3d.pathpatch_2d_to_3d(radar_circle, z=0, zdir=\"z\")\n",
164
+ "\n",
165
+ "def update(frame):\n",
166
+ " if paused[0]:\n",
167
+ " return\n",
168
+ " \n",
169
+ " # به‌روزرسانی زاویه رادار\n",
170
+ " radar_angle[0] = (radar_angle[0] + 1) % 360\n",
171
+ "\n",
172
+ " all_pos = []\n",
173
+ " all_colors = []\n",
174
+ " all_sizes = []\n",
175
+ "\n",
176
+ " detected_pos = []\n",
177
+ " emergency_pos = []\n",
178
+ "\n",
179
+ " for ac in aircrafts:\n",
180
+ " # محدود کردن سرعت\n",
181
+ " v_mag = np.linalg.norm(ac['velocity'])\n",
182
+ " max_speed = 250 # m/s\n",
183
+ " if v_mag > max_speed:\n",
184
+ " ac['velocity'] = (ac['velocity'] / v_mag) * max_speed\n",
185
+ " \n",
186
+ " # به‌روزرسانی موقعیت\n",
187
+ " ac['position'] += ac['velocity']\n",
188
+ " \n",
189
+ " # برخورد به دیواره‌های جهان\n",
190
+ " for i in [0, 1]:\n",
191
+ " if ac['position'][i] < 0 or ac['position'][i] > WORLD_SIZE:\n",
192
+ " ac['velocity'][i] = -ac['velocity'][i]\n",
193
+ " ac['position'][i] = np.clip(ac['position'][i], 0, WORLD_SIZE)\n",
194
+ " if ac['position'][2] < 0 or ac['position'][2] > RADAR_ALTITUDE_LIMIT:\n",
195
+ " ac['velocity'][2] = -ac['velocity'][2]\n",
196
+ " ac['position'][2] = np.clip(ac['position'][2], 0, RADAR_ALTITUDE_LIMIT)\n",
197
+ " \n",
198
+ " # ثبت رد حرکت\n",
199
+ " ac['track'].append(ac['position'].copy())\n",
200
+ " if len(ac['track']) > TRACK_LENGTH:\n",
201
+ " ac['track'].pop(0)\n",
202
+ " \n",
203
+ " all_pos.append(ac['position'])\n",
204
+ " all_colors.append(AIRCRAFT_TYPES[ac['type']]['color'])\n",
205
+ " all_sizes.append(AIRCRAFT_TYPES[ac['type']]['size'])\n",
206
+ " \n",
207
+ " if detect_3d(ac, radar_pos):\n",
208
+ " detected_pos.append(ac['position'])\n",
209
+ " if ac['emergency']:\n",
210
+ " emergency_pos.append(ac['position'])\n",
211
+ "\n",
212
+ " # تبدیل به np.array\n",
213
+ " all_pos = np.array(all_pos)\n",
214
+ " detected_pos = np.array(detected_pos)\n",
215
+ " emergency_pos = np.array(emergency_pos)\n",
216
+ "\n",
217
+ " # آپدیت scatter کل هواپیماها\n",
218
+ " if len(all_pos) > 0:\n",
219
+ " all_scatter._offsets3d = (all_pos[:,0], all_pos[:,1], all_pos[:,2])\n",
220
+ " all_scatter.set_color(all_colors)\n",
221
+ " all_scatter.set_sizes(all_sizes)\n",
222
+ " else:\n",
223
+ " all_scatter._offsets3d = ([], [], [])\n",
224
+ " all_scatter.set_color([])\n",
225
+ " all_scatter.set_sizes([])\n",
226
+ "\n",
227
+ " # آپدیت scatter هواپیماهای تشخیص داده شده\n",
228
+ " if len(detected_pos) > 0:\n",
229
+ " detected_scatter._offsets3d = (detected_pos[:,0], detected_pos[:,1], detected_pos[:,2])\n",
230
+ " detected_scatter.set_sizes([60]*len(detected_pos))\n",
231
+ " else:\n",
232
+ " detected_scatter._offsets3d = ([], [], [])\n",
233
+ " detected_scatter.set_sizes([])\n",
234
+ "\n",
235
+ " # آپدیت scatter هواپیماهای اضطراری\n",
236
+ " if len(emergency_pos) > 0:\n",
237
+ " emergency_scatter._offsets3d = (emergency_pos[:,0], emergency_pos[:,1], emergency_pos[:,2])\n",
238
+ " emergency_scatter.set_sizes([80]*len(emergency_pos))\n",
239
+ " else:\n",
240
+ " emergency_scatter._offsets3d = ([], [], [])\n",
241
+ " emergency_scatter.set_sizes([])\n",
242
+ "\n",
243
+ " # به‌روزرسانی خطوط رد حرکت\n",
244
+ " for i, ac in enumerate(aircrafts):\n",
245
+ " if len(ac['track']) >= 2:\n",
246
+ " track_arr = np.array(ac['track'])\n",
247
+ " track_lines[i].set_data(track_arr[:,0], track_arr[:,1])\n",
248
+ " track_lines[i].set_3d_properties(track_arr[:,2])\n",
249
+ " else:\n",
250
+ " track_lines[i].set_data([], [])\n",
251
+ " track_lines[i].set_3d_properties([])\n",
252
+ "\n",
253
+ " # به‌روزرسانی خط اسکن رادار\n",
254
+ " angle_rad = np.radians(radar_angle[0])\n",
255
+ " x = [radar_pos[0], radar_pos[0] + RADAR_RANGE * np.cos(angle_rad)]\n",
256
+ " y = [radar_pos[1], radar_pos[1] + RADAR_RANGE * np.sin(angle_rad)]\n",
257
+ " z = [0, 0]\n",
258
+ " radar_sweep_line.set_data(x, y)\n",
259
+ " radar_sweep_line.set_3d_properties(z)\n",
260
+ "\n",
261
+ " ax.set_title(f\"3D Radar Simulation - Scan Angle: {radar_angle[0]:.1f}°\")\n",
262
+ "\n",
263
+ " \n",
264
+ "def on_key(event):\n",
265
+ " if event.key == ' ':\n",
266
+ " paused[0] = not paused[0]\n",
267
+ "\n",
268
+ "fig.canvas.mpl_connect('key_press_event', on_key)\n",
269
+ "\n",
270
+ "ani = FuncAnimation(fig, update, interval=50)\n",
271
+ "plt.legend(loc='upper right')\n",
272
+ "plt.show()\n"
273
+ ]
274
+ }
275
+ ],
276
+ "metadata": {
277
+ "kernelspec": {
278
+ "display_name": "Python [conda env:base] *",
279
+ "language": "python",
280
+ "name": "conda-base-py"
281
+ },
282
+ "language_info": {
283
+ "codemirror_mode": {
284
+ "name": "ipython",
285
+ "version": 3
286
+ },
287
+ "file_extension": ".py",
288
+ "mimetype": "text/x-python",
289
+ "name": "python",
290
+ "nbconvert_exporter": "python",
291
+ "pygments_lexer": "ipython3",
292
+ "version": "3.12.7"
293
+ }
294
+ },
295
+ "nbformat": 4,
296
+ "nbformat_minor": 5
297
+ }
opencv_test/Untitled7.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
opencv_test/object_memory.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8d7aa882c8afb95efe250e6849296fb22ca1695eff05da8a168ac658cfb750a
3
+ size 692
opencv_test/untitled.py ADDED
File without changes
opencv_test/untitled1.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+
4
+ # استفاده از الگوریتم پیشرفته KNN برای background subtraction
5
+ back_sub = cv2.createBackgroundSubtractorKNN(history=500, dist2Threshold=400, detectShadows=True)
6
+
7
+ # تابع برای محاسبه مرکز (centroid)
8
+ def get_centroid(x, y, w, h):
9
+ return (int(x + w / 2), int(y + h / 2))
10
+
11
+ # گرفتن تصویر از دوربین
12
+ cap = cv2.VideoCapture(0)
13
+
14
+ # Kalman Filter برای ردیابی دقیق‌تر
15
+ kalman = cv2.KalmanFilter(4, 2)
16
+ kalman.measurementMatrix = np.array([[1, 0, 0, 0], [0, 1, 0, 0]], np.float32)
17
+ kalman.transitionMatrix = np.array([[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32)
18
+ kalman.processNoiseCov = np.array([[1e-5, 0, 0, 0], [0, 1e-5, 0, 0], [0, 0, 1e-5, 0], [0, 0, 0, 1e-5]], np.float32)
19
+
20
+ while True:
21
+ ret, frame = cap.read()
22
+ if not ret:
23
+ break
24
+
25
+ # تبدیل تصویر به خاکستری
26
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
27
+
28
+ # دریافت ماسک اشیاء متحرک
29
+ fg_mask = back_sub.apply(frame)
30
+
31
+ # حذف نویز پیشرفته با استفاده از GaussianBlur و MedianBlur
32
+ fg_mask = cv2.GaussianBlur(fg_mask, (5, 5), 0) # حذف نویز با بلور گوسی
33
+ fg_mask = cv2.medianBlur(fg_mask, 5) # حذف نویز با استفاده از MedianBlur
34
+
35
+ # اعمال عملیات مورفولوژیکی پیچیده‌تر برای حذف نویز و سایه‌ها
36
+ kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7)) # هسته بزرگتر
37
+ fg_mask = cv2.morphologyEx(fg_mask, cv2.MORPH_CLOSE, kernel, iterations=2) # استفاده از عملیات CLOSE
38
+ fg_mask = cv2.dilate(fg_mask, kernel, iterations=3) # افزایش سایز ماسک
39
+
40
+ # پیدا کردن کانتورها
41
+ contours, _ = cv2.findContours(fg_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
42
+
43
+ for cnt in contours:
44
+ area = cv2.contourArea(cnt)
45
+ if area > 500: # فقط اشیاء بزرگتر از این اندازه رو بررسی کن
46
+ x, y, w, h = cv2.boundingRect(cnt)
47
+ centroid = get_centroid(x, y, w, h)
48
+
49
+ # استفاده از Kalman Filter برای پیش‌بینی موقعیت شیء
50
+ kalman.correct(np.array([np.float32(centroid[0]), np.float32(centroid[1])]))
51
+ prediction = kalman.predict()
52
+
53
+ predicted_x, predicted_y = int(prediction[0]), int(prediction[1])
54
+
55
+ # رسم باکس و پیش‌بینی موقعیت
56
+ cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
57
+ cv2.circle(frame, centroid, 4, (0, 0, 255), -1)
58
+ cv2.putText(frame, "Moving Object", (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
59
+ cv2.circle(frame, (predicted_x, predicted_y), 4, (255, 0, 0), -1) # پیش‌بینی موقعیت
60
+
61
+ # نمایش تصویر
62
+ cv2.imshow('Optimized Object Tracking', frame)
63
+
64
+ # خروج از برنامه با کلید ESC
65
+ if cv2.waitKey(1) & 0xFF == 27:
66
+ break
67
+
68
+ cap.release()
69
+ cv2.destroyAllWindows()
opencv_test/untitled10.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import time
4
+ from sklearn.neighbors import KNeighborsClassifier
5
+ from collections import defaultdict, deque
6
+
7
+ # Create background subtractor for motion detection
8
+ back_sub = cv2.createBackgroundSubtractorKNN(history=500, dist2Threshold=400, detectShadows=True)
9
+ cap = cv2.VideoCapture(0)
10
+
11
+ # Store object traces
12
+ object_traces = defaultdict(lambda: deque(maxlen=30)) # Last 30 points of each object
13
+ object_last_seen = {}
14
+ object_id_counter = 0
15
+
16
+ # For real-time learning
17
+ knn = KNeighborsClassifier(n_neighbors=3)
18
+ features_set = []
19
+ labels_set = []
20
+ frame_count = 0
21
+ learning_interval = 30
22
+
23
+ # Timer for data collection
24
+ start_time = time.time()
25
+ learning_time_limit = 60 # 1 minute for data collection
26
+
27
+ # Variable to avoid predicting before training
28
+ is_trained = False
29
+
30
+ def apply_noise_reduction(mask):
31
+ kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
32
+ mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=2)
33
+ mask = cv2.dilate(mask, kernel, iterations=1)
34
+ return mask
35
+
36
+ def get_centroid(x, y, w, h):
37
+ return (int(x + w / 2), int(y + h / 2))
38
+
39
+ def calculate_direction(trace):
40
+ if len(trace) < 2:
41
+ return "-"
42
+ dx = trace[-1][0] - trace[0][0]
43
+ dy = trace[-1][1] - trace[0][1]
44
+ if abs(dx) > abs(dy):
45
+ return "Left" if dx < 0 else "Right"
46
+ else:
47
+ return "Up" if dy < 0 else "Down"
48
+
49
+ def calculate_speed(trace, duration):
50
+ if len(trace) < 2 or duration == 0:
51
+ return 0
52
+ dist = np.linalg.norm(np.array(trace[-1]) - np.array(trace[0]))
53
+ return dist / duration
54
+
55
+ def count_direction_changes(trace):
56
+ changes = 0
57
+ for i in range(2, len(trace)):
58
+ dx1 = trace[i-1][0] - trace[i-2][0]
59
+ dx2 = trace[i][0] - trace[i-1][0]
60
+ if dx1 * dx2 < 0: # Horizontal direction change
61
+ changes += 1
62
+ return changes
63
+
64
+ while True:
65
+ ret, frame = cap.read()
66
+ if not ret:
67
+ break
68
+
69
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
70
+ fg_mask = back_sub.apply(frame)
71
+ fg_mask = apply_noise_reduction(fg_mask)
72
+
73
+ contours, _ = cv2.findContours(fg_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
74
+
75
+ current_ids = []
76
+ predicted = 1 # Default prediction value (if no prediction is made)
77
+ for cnt in contours:
78
+ area = cv2.contourArea(cnt)
79
+ if area < 150:
80
+ continue
81
+
82
+ x, y, w, h = cv2.boundingRect(cnt)
83
+ centroid = get_centroid(x, y, w, h)
84
+
85
+ # Identify or create a new ID for the object
86
+ matched_id = None
87
+ for oid, trace in object_traces.items():
88
+ if np.linalg.norm(np.array(trace[-1]) - np.array(centroid)) < 50:
89
+ matched_id = oid
90
+ break
91
+
92
+ if matched_id is None:
93
+ matched_id = object_id_counter
94
+ object_id_counter += 1
95
+
96
+ object_traces[matched_id].append(centroid)
97
+ object_last_seen[matched_id] = time.time()
98
+ current_ids.append(matched_id)
99
+
100
+ trace = object_traces[matched_id]
101
+ duration = time.time() - object_last_seen[matched_id] + 0.001
102
+ speed = calculate_speed(trace, duration)
103
+ direction = calculate_direction(trace)
104
+ direction_changes = count_direction_changes(trace)
105
+ total_move = sum(np.linalg.norm(np.array(trace[i]) - np.array(trace[i-1])) for i in range(1, len(trace)))
106
+
107
+ # Feature for the model
108
+ feature = [w, h, centroid[0], centroid[1], area, speed, direction_changes]
109
+ label = 1 # Default label: Normal
110
+
111
+ # Simple automatic labeling:
112
+ if speed > 100 or direction_changes > 4:
113
+ label = 2 # Suspicious
114
+
115
+ features_set.append(feature)
116
+ labels_set.append(label)
117
+
118
+ # Train the model only after enough data is collected
119
+ if time.time() - start_time < learning_time_limit:
120
+ # Still in data collection phase
121
+ continue
122
+ elif not is_trained: # If the model hasn't been trained yet
123
+ if len(features_set) > 10:
124
+ knn.fit(features_set, labels_set) # Train the model
125
+ is_trained = True # Model is trained
126
+ print("Model updated.")
127
+
128
+ # Prediction only after training
129
+ if is_trained:
130
+ predicted = knn.predict([feature])[0]
131
+
132
+ # Draw information on the frame
133
+ cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0) if label == 1 else (0, 0, 255), 2)
134
+ cv2.circle(frame, centroid, 4, (255, 255, 255), -1)
135
+ cv2.putText(frame, f"ID: {matched_id} | Direction: {direction} | Speed: {int(speed)}", (x, y - 25),
136
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0), 1)
137
+ cv2.putText(frame, f"Behavior: {'Normal' if predicted == 1 else 'Suspicious'}", (x, y - 5),
138
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 1)
139
+
140
+ frame_count += 1
141
+
142
+ # Remove old object IDs
143
+ for oid in list(object_last_seen):
144
+ if time.time() - object_last_seen[oid] > 2:
145
+ object_traces.pop(oid, None)
146
+ object_last_seen.pop(oid, None)
147
+
148
+ cv2.imshow("Behavioral Intelligence", frame)
149
+ if cv2.waitKey(1) & 0xFF == 27:
150
+ break
151
+
152
+ cap.release()
153
+ cv2.destroyAllWindows()
opencv_test/untitled11.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import time
4
+ from sklearn.neighbors import KNeighborsClassifier
5
+ from collections import defaultdict, deque
6
+
7
+ # Create background subtractor for motion detection
8
+ back_sub = cv2.createBackgroundSubtractorKNN(history=500, dist2Threshold=400, detectShadows=True)
9
+ cap = cv2.VideoCapture(0)
10
+
11
+ # Store object traces
12
+ object_traces = defaultdict(lambda: deque(maxlen=30)) # Last 30 points of each object
13
+ object_last_seen = {}
14
+ object_id_counter = 0
15
+
16
+ # For real-time learning
17
+ knn = KNeighborsClassifier(n_neighbors=3)
18
+ features_set = []
19
+ labels_set = []
20
+
21
+ # Timer for real-time learning and training interval
22
+ start_time = time.time()
23
+ training_interval = 5 # 5 seconds for real-time training
24
+
25
+ # Variable to avoid predicting before training
26
+ is_trained = False
27
+
28
+ def apply_noise_reduction(mask):
29
+ kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
30
+ mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=2)
31
+ mask = cv2.dilate(mask, kernel, iterations=1)
32
+ return mask
33
+
34
+ def get_centroid(x, y, w, h):
35
+ return (int(x + w / 2), int(y + h / 2))
36
+
37
+ def calculate_direction(trace):
38
+ if len(trace) < 2:
39
+ return "-"
40
+ dx = trace[-1][0] - trace[0][0]
41
+ dy = trace[-1][1] - trace[0][1]
42
+ if abs(dx) > abs(dy):
43
+ return "Left" if dx < 0 else "Right"
44
+ else:
45
+ return "Up" if dy < 0 else "Down"
46
+
47
+ def calculate_speed(trace, duration):
48
+ if len(trace) < 2 or duration == 0:
49
+ return 0
50
+ dist = np.linalg.norm(np.array(trace[-1]) - np.array(trace[0]))
51
+ return dist / duration
52
+
53
+ def count_direction_changes(trace):
54
+ changes = 0
55
+ for i in range(2, len(trace)):
56
+ dx1 = trace[i-1][0] - trace[i-2][0]
57
+ dx2 = trace[i][0] - trace[i-1][0]
58
+ if dx1 * dx2 < 0: # Horizontal direction change
59
+ changes += 1
60
+ return changes
61
+
62
+ while True:
63
+ ret, frame = cap.read()
64
+ if not ret:
65
+ break
66
+
67
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
68
+ fg_mask = back_sub.apply(frame)
69
+ fg_mask = apply_noise_reduction(fg_mask)
70
+
71
+ contours, _ = cv2.findContours(fg_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
72
+
73
+ current_ids = []
74
+ predicted = 1 # Default prediction value (if no prediction is made)
75
+ for cnt in contours:
76
+ area = cv2.contourArea(cnt)
77
+ if area < 150:
78
+ continue
79
+
80
+ x, y, w, h = cv2.boundingRect(cnt)
81
+ centroid = get_centroid(x, y, w, h)
82
+
83
+ # Identify or create a new ID for the object
84
+ matched_id = None
85
+ for oid, trace in object_traces.items():
86
+ if np.linalg.norm(np.array(trace[-1]) - np.array(centroid)) < 50:
87
+ matched_id = oid
88
+ break
89
+
90
+ if matched_id is None:
91
+ matched_id = object_id_counter
92
+ object_id_counter += 1
93
+
94
+ object_traces[matched_id].append(centroid)
95
+ object_last_seen[matched_id] = time.time()
96
+ current_ids.append(matched_id)
97
+
98
+ trace = object_traces[matched_id]
99
+ duration = time.time() - object_last_seen[matched_id] + 0.001
100
+ speed = calculate_speed(trace, duration)
101
+ direction = calculate_direction(trace)
102
+ direction_changes = count_direction_changes(trace)
103
+ total_move = sum(np.linalg.norm(np.array(trace[i]) - np.array(trace[i-1])) for i in range(1, len(trace)))
104
+
105
+ # Feature for the model
106
+ feature = [w, h, centroid[0], centroid[1], area, speed, direction_changes]
107
+ label = 1 # Default label: Normal
108
+
109
+ # Simple automatic labeling:
110
+ if speed > 100 or direction_changes > 4:
111
+ label = 2 # Suspicious
112
+
113
+ features_set.append(feature)
114
+ labels_set.append(label)
115
+
116
+ # Retrain the model every 5 seconds
117
+ if time.time() - start_time > training_interval:
118
+ if len(features_set) > 10:
119
+ knn.fit(features_set, labels_set) # Train the model
120
+ is_trained = True # Model is trained
121
+ print("Model updated.")
122
+ start_time = time.time() # Reset the timer after retraining
123
+
124
+ # Prediction only after training
125
+ if is_trained:
126
+ predicted = knn.predict([feature])[0]
127
+
128
+ # Draw information on the frame
129
+ cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0) if label == 1 else (0, 0, 255), 2)
130
+ cv2.circle(frame, centroid, 4, (255, 255, 255), -1)
131
+ cv2.putText(frame, f"ID: {matched_id} | Direction: {direction} | Speed: {int(speed)}", (x, y - 25),
132
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0), 1)
133
+ cv2.putText(frame, f"Behavior: {'Normal' if predicted == 1 else 'Suspicious'}", (x, y - 5),
134
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 1)
135
+
136
+ # Remove old object IDs
137
+ for oid in list(object_last_seen):
138
+ if time.time() - object_last_seen[oid] > 2:
139
+ object_traces.pop(oid, None)
140
+ object_last_seen.pop(oid, None)
141
+
142
+ cv2.imshow("Behavioral Intelligence", frame)
143
+ if cv2.waitKey(1) & 0xFF == 27:
144
+ break
145
+
146
+ cap.release()
147
+ cv2.destroyAllWindows()
opencv_test/untitled12.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import time
4
+ from sklearn.neighbors import KNeighborsClassifier
5
+ from collections import defaultdict, deque
6
+
7
+ # Create background subtractor for motion detection
8
+ back_sub = cv2.createBackgroundSubtractorKNN(history=500, dist2Threshold=400, detectShadows=True)
9
+ cap = cv2.VideoCapture(0)
10
+
11
+ # Store object traces
12
+ object_traces = defaultdict(lambda: deque(maxlen=30)) # Last 30 points of each object
13
+ object_last_seen = {}
14
+ object_id_counter = 0
15
+
16
+ # For real-time learning
17
+ knn = KNeighborsClassifier(n_neighbors=3)
18
+ features_set = []
19
+ labels_set = []
20
+
21
+ # Timer for real-time learning and training interval
22
+ start_time = time.time()
23
+ training_interval = 5 # 5 seconds for real-time training
24
+
25
+ # Variable to avoid predicting before training
26
+ is_trained = False
27
+
28
+ # Memory storage for past predictions and features
29
+ memory = defaultdict(list) # Store memory of features and predictions for each object
30
+
31
+ def apply_noise_reduction(mask):
32
+ kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
33
+ mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=2)
34
+ mask = cv2.dilate(mask, kernel, iterations=1)
35
+ return mask
36
+
37
+ def get_centroid(x, y, w, h):
38
+ return (int(x + w / 2), int(y + h / 2))
39
+
40
+ def calculate_direction(trace):
41
+ if len(trace) < 2:
42
+ return "-"
43
+ dx = trace[-1][0] - trace[0][0]
44
+ dy = trace[-1][1] - trace[0][1]
45
+ if abs(dx) > abs(dy):
46
+ return "Left" if dx < 0 else "Right"
47
+ else:
48
+ return "Up" if dy < 0 else "Down"
49
+
50
+ def calculate_speed(trace, duration):
51
+ if len(trace) < 2 or duration == 0:
52
+ return 0
53
+ dist = np.linalg.norm(np.array(trace[-1]) - np.array(trace[0]))
54
+ return dist / duration
55
+
56
+ def count_direction_changes(trace):
57
+ changes = 0
58
+ for i in range(2, len(trace)):
59
+ dx1 = trace[i-1][0] - trace[i-2][0]
60
+ dx2 = trace[i][0] - trace[i-1][0]
61
+ if dx1 * dx2 < 0: # Horizontal direction change
62
+ changes += 1
63
+ return changes
64
+
65
+ while True:
66
+ ret, frame = cap.read()
67
+ if not ret:
68
+ break
69
+
70
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
71
+ fg_mask = back_sub.apply(frame)
72
+ fg_mask = apply_noise_reduction(fg_mask)
73
+
74
+ contours, _ = cv2.findContours(fg_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
75
+
76
+ current_ids = []
77
+ predicted = 1 # Default prediction value (if no prediction is made)
78
+ for cnt in contours:
79
+ area = cv2.contourArea(cnt)
80
+ if area < 150:
81
+ continue
82
+
83
+ x, y, w, h = cv2.boundingRect(cnt)
84
+ centroid = get_centroid(x, y, w, h)
85
+
86
+ # Identify or create a new ID for the object
87
+ matched_id = None
88
+ for oid, trace in object_traces.items():
89
+ if np.linalg.norm(np.array(trace[-1]) - np.array(centroid)) < 50:
90
+ matched_id = oid
91
+ break
92
+
93
+ if matched_id is None:
94
+ matched_id = object_id_counter
95
+ object_id_counter += 1
96
+
97
+ object_traces[matched_id].append(centroid)
98
+ object_last_seen[matched_id] = time.time()
99
+ current_ids.append(matched_id)
100
+
101
+ trace = object_traces[matched_id]
102
+ duration = time.time() - object_last_seen[matched_id] + 0.001
103
+ speed = calculate_speed(trace, duration)
104
+ direction = calculate_direction(trace)
105
+ direction_changes = count_direction_changes(trace)
106
+ total_move = sum(np.linalg.norm(np.array(trace[i]) - np.array(trace[i-1])) for i in range(1, len(trace)))
107
+
108
+ # Feature for the model
109
+ feature = [w, h, centroid[0], centroid[1], area, speed, direction_changes]
110
+ label = 1 # Default label: Normal
111
+
112
+ # Simple automatic labeling:
113
+ if speed > 100 or direction_changes > 4:
114
+ label = 2 # Suspicious
115
+
116
+ features_set.append(feature)
117
+ labels_set.append(label)
118
+
119
+ # Store features and predictions in memory
120
+ memory[matched_id].append({
121
+ 'features': feature,
122
+ 'prediction': label
123
+ })
124
+
125
+ # Retrain the model every 5 seconds
126
+ if time.time() - start_time > training_interval:
127
+ if len(features_set) > 10:
128
+ knn.fit(features_set, labels_set) # Train the model
129
+ is_trained = True # Model is trained
130
+ print("Model updated.")
131
+ start_time = time.time() # Reset the timer after retraining
132
+
133
+ # Prediction only after training
134
+ if is_trained:
135
+ predicted = knn.predict([feature])[0]
136
+
137
+ # Draw information on the frame
138
+ cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0) if label == 1 else (0, 0, 255), 2)
139
+ cv2.circle(frame, centroid, 4, (255, 255, 255), -1)
140
+ cv2.putText(frame, f"ID: {matched_id} | Direction: {direction} | Speed: {int(speed)}", (x, y - 25),
141
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0), 1)
142
+ cv2.putText(frame, f"Behavior: {'Normal' if predicted == 1 else 'Suspicious'}", (x, y - 5),
143
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 1)
144
+
145
+ # Remove old object IDs from memory
146
+ for oid in list(object_last_seen):
147
+ if time.time() - object_last_seen[oid] > 2:
148
+ object_traces.pop(oid, None)
149
+ object_last_seen.pop(oid, None)
150
+ memory.pop(oid, None) # Remove from memory as well
151
+
152
+ cv2.imshow("Behavioral Intelligence", frame)
153
+ if cv2.waitKey(1) & 0xFF == 27:
154
+ break
155
+
156
+ cap.release()
157
+ cv2.destroyAllWindows()
opencv_test/untitled13.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import time
4
+ from sklearn.neighbors import KNeighborsClassifier
5
+ from collections import defaultdict, deque
6
+
7
+ # Create background subtractor for motion detection
8
+ back_sub = cv2.createBackgroundSubtractorKNN(history=500, dist2Threshold=400, detectShadows=True)
9
+ cap = cv2.VideoCapture(0)
10
+
11
+ # Store object traces
12
+ object_traces = defaultdict(lambda: deque(maxlen=30)) # Last 30 points of each object
13
+ object_last_seen = {}
14
+ object_id_counter = 0
15
+
16
+ # For real-time learning
17
+ knn = KNeighborsClassifier(n_neighbors=3)
18
+ features_set = []
19
+ labels_set = []
20
+
21
+ # Timer for real-time learning and training interval
22
+ start_time = time.time()
23
+ training_interval = 5 # 5 seconds for real-time training
24
+
25
+ # Variable to avoid predicting before training
26
+ is_trained = False
27
+
28
+ # Memory storage for past predictions and features (long-term memory)
29
+ long_term_memory = defaultdict(list) # Store memory of features and predictions for each object
30
+
31
+ # Function to apply noise reduction (post-processing)
32
+ def apply_noise_reduction(mask):
33
+ kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
34
+ mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=2)
35
+ mask = cv2.dilate(mask, kernel, iterations=1)
36
+ return mask
37
+
38
+ def get_centroid(x, y, w, h):
39
+ return (int(x + w / 2), int(y + h / 2))
40
+
41
+ # Function to calculate the direction of movement
42
+ def calculate_direction(trace):
43
+ if len(trace) < 2:
44
+ return "-"
45
+ dx = trace[-1][0] - trace[0][0]
46
+ dy = trace[-1][1] - trace[0][1]
47
+ if abs(dx) > abs(dy):
48
+ return "Left" if dx < 0 else "Right"
49
+ else:
50
+ return "Up" if dy < 0 else "Down"
51
+
52
+ # Function to calculate speed based on trace distance and duration
53
+ def calculate_speed(trace, duration):
54
+ if len(trace) < 2 or duration == 0:
55
+ return 0
56
+ dist = np.linalg.norm(np.array(trace[-1]) - np.array(trace[0]))
57
+ return dist / duration
58
+
59
+ # Function to count the number of direction changes (for complexity detection)
60
+ def count_direction_changes(trace):
61
+ changes = 0
62
+ for i in range(2, len(trace)):
63
+ dx1 = trace[i-1][0] - trace[i-2][0]
64
+ dx2 = trace[i][0] - trace[i-1][0]
65
+ if dx1 * dx2 < 0: # Horizontal direction change
66
+ changes += 1
67
+ return changes
68
+
69
+ # Function to check if an object is old (not detected for a while)
70
+ def is_old_object(object_id, threshold_time=10):
71
+ # Threshold time is how long since last seen for the object to be considered "old"
72
+ if time.time() - object_last_seen[object_id] > threshold_time:
73
+ return True
74
+ return False
75
+
76
+ while True:
77
+ ret, frame = cap.read()
78
+ if not ret:
79
+ break
80
+
81
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
82
+ fg_mask = back_sub.apply(frame)
83
+ fg_mask = apply_noise_reduction(fg_mask)
84
+
85
+ contours, _ = cv2.findContours(fg_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
86
+
87
+ current_ids = []
88
+ predicted = 1 # Default prediction value (if no prediction is made)
89
+ for cnt in contours:
90
+ area = cv2.contourArea(cnt)
91
+ if area < 150:
92
+ continue
93
+
94
+ x, y, w, h = cv2.boundingRect(cnt)
95
+ centroid = get_centroid(x, y, w, h)
96
+
97
+ # Identify or create a new ID for the object
98
+ matched_id = None
99
+ for oid, trace in object_traces.items():
100
+ if np.linalg.norm(np.array(trace[-1]) - np.array(centroid)) < 50:
101
+ matched_id = oid
102
+ break
103
+
104
+ if matched_id is None:
105
+ matched_id = object_id_counter
106
+ object_id_counter += 1
107
+
108
+ object_traces[matched_id].append(centroid)
109
+ object_last_seen[matched_id] = time.time()
110
+ current_ids.append(matched_id)
111
+
112
+ trace = object_traces[matched_id]
113
+ duration = time.time() - object_last_seen[matched_id] + 0.001
114
+ speed = calculate_speed(trace, duration)
115
+ direction = calculate_direction(trace)
116
+ direction_changes = count_direction_changes(trace)
117
+ total_move = sum(np.linalg.norm(np.array(trace[i]) - np.array(trace[i-1])) for i in range(1, len(trace)))
118
+
119
+ # Feature for the model
120
+ feature = [w, h, centroid[0], centroid[1], area, speed, direction_changes]
121
+ label = 1 # Default label: Normal
122
+
123
+ # Simple automatic labeling based on speed and direction changes:
124
+ if speed > 100 or direction_changes > 4:
125
+ label = 2 # Suspicious
126
+
127
+ features_set.append(feature)
128
+ labels_set.append(label)
129
+
130
+ # Store features and predictions in long-term memory
131
+ long_term_memory[matched_id].append({
132
+ 'features': feature,
133
+ 'prediction': label
134
+ })
135
+
136
+ # Retrain the model every 5 seconds
137
+ if time.time() - start_time > training_interval:
138
+ if len(features_set) > 10:
139
+ knn.fit(features_set, labels_set) # Train the model
140
+ is_trained = True # Model is trained
141
+ print("Model updated.")
142
+ start_time = time.time() # Reset the timer after retraining
143
+
144
+ # Prediction only after training
145
+ if is_trained:
146
+ predicted = knn.predict([feature])[0]
147
+
148
+ # Draw information on the frame
149
+ cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0) if label == 1 else (0, 0, 255), 2)
150
+ cv2.circle(frame, centroid, 4, (255, 255, 255), -1)
151
+ cv2.putText(frame, f"ID: {matched_id} | Direction: {direction} | Speed: {int(speed)}", (x, y - 25),
152
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0), 1)
153
+ cv2.putText(frame, f"Behavior: {'Normal' if predicted == 1 else 'Suspicious'}", (x, y - 5),
154
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 1)
155
+
156
+ # Check if the object is old and mark it
157
+ if is_old_object(matched_id):
158
+ cv2.putText(frame, f"Old Object", (x, y - 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
159
+
160
+ # Remove old object IDs from memory
161
+ for oid in list(object_last_seen):
162
+ if time.time() - object_last_seen[oid] > 2:
163
+ object_traces.pop(oid, None)
164
+ object_last_seen.pop(oid, None)
165
+ long_term_memory.pop(oid, None) # Remove from long-term memory as well
166
+
167
+ cv2.imshow("Behavioral Intelligence", frame)
168
+ if cv2.waitKey(1) & 0xFF == 27:
169
+ break
170
+
171
+ cap.release()
172
+ cv2.destroyAllWindows()
opencv_test/untitled14.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ from collections import deque, defaultdict
4
+ import time
5
+
6
+ # پارامترها
7
+ trace_len = 20
8
+ min_area = 500
9
+
10
+ # حافظه
11
+ object_traces = defaultdict(lambda: deque(maxlen=trace_len))
12
+ long_term_memory = defaultdict(list)
13
+ next_object_id = 1
14
+ object_centroids = {}
15
+
16
+ def count_direction_changes(trace):
17
+ count = 0
18
+ for i in range(2, len(trace)):
19
+ v1 = np.array(trace[i - 1]) - np.array(trace[i - 2])
20
+ v2 = np.array(trace[i]) - np.array(trace[i - 1])
21
+ if np.dot(v1, v2) < 0:
22
+ count += 1
23
+ return count
24
+
25
+ def extract_features(trace):
26
+ if len(trace) < 2:
27
+ return [0, 0, 0, 0]
28
+ dx = trace[-1][0] - trace[0][0]
29
+ dy = trace[-1][1] - trace[0][1]
30
+ total_distance = sum(np.linalg.norm(np.array(trace[i]) - np.array(trace[i-1])) for i in range(1, len(trace)))
31
+ avg_speed = total_distance / (len(trace) + 1e-6)
32
+ direction_changes = count_direction_changes(trace)
33
+ return [dx, dy, avg_speed, direction_changes]
34
+
35
+ def ai_brain(trace, memory):
36
+ if len(trace) < 3:
37
+ return "Unknown"
38
+ dx, dy, speed, changes = extract_features(trace)
39
+
40
+ if len(memory) >= 5 and memory.count("Erratic") > 3:
41
+ return "Suspicious"
42
+ if speed > 150 and changes > 4:
43
+ return "Erratic"
44
+ if speed < 5 and changes == 0:
45
+ return "Idle"
46
+ return "Normal"
47
+
48
+ def get_color(i):
49
+ np.random.seed(i)
50
+ return tuple(int(x) for x in np.random.randint(100, 255, 3))
51
+
52
+ # آماده‌سازی دوربین
53
+ cap = cv2.VideoCapture(0)
54
+ ret, prev = cap.read()
55
+ prev_gray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)
56
+ prev_gray = cv2.GaussianBlur(prev_gray, (21, 21), 0)
57
+
58
+ while True:
59
+ ret, frame = cap.read()
60
+ if not ret:
61
+ break
62
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
63
+ gray_blur = cv2.GaussianBlur(gray, (21, 21), 0)
64
+
65
+ # محاسبه اختلاف فریم‌ها
66
+ delta = cv2.absdiff(prev_gray, gray_blur)
67
+ thresh = cv2.threshold(delta, 25, 255, cv2.THRESH_BINARY)[1]
68
+ thresh = cv2.dilate(thresh, None, iterations=2)
69
+
70
+ contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
71
+ current_centroids = []
72
+
73
+ for cnt in contours:
74
+ if cv2.contourArea(cnt) < min_area:
75
+ continue
76
+ (x, y, w, h) = cv2.boundingRect(cnt)
77
+ cx, cy = x + w // 2, y + h // 2
78
+ current_centroids.append((cx, cy))
79
+ matched_id = None
80
+
81
+ # تطبیق با شیء قبلی
82
+ for object_id, last_centroid in object_centroids.items():
83
+ if np.linalg.norm(np.array([cx, cy]) - np.array(last_centroid)) < 50:
84
+ matched_id = object_id
85
+ break
86
+
87
+ if matched_id is None:
88
+ matched_id = next_object_id
89
+ next_object_id += 1
90
+
91
+ object_centroids[matched_id] = (cx, cy)
92
+ object_traces[matched_id].append((cx, cy))
93
+ trace = object_traces[matched_id]
94
+
95
+ behavior = ai_brain(trace, [m['status'] for m in long_term_memory[matched_id]])
96
+ long_term_memory[matched_id].append({'status': behavior, 'timestamp': time.time()})
97
+
98
+ color = get_color(matched_id)
99
+ cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
100
+ cv2.putText(frame, f"ID {matched_id}", (x, y - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
101
+ cv2.putText(frame, f"Behavior: {behavior}", (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 1)
102
+
103
+ # پاکسازی اشیاء غیرفعال
104
+ inactive_ids = [obj_id for obj_id in object_centroids if obj_id not in [id for id, _ in object_centroids.items()]]
105
+ for iid in inactive_ids:
106
+ object_centroids.pop(iid, None)
107
+
108
+ prev_gray = gray_blur.copy()
109
+ cv2.imshow("Motion AI", frame)
110
+ if cv2.waitKey(1) & 0xFF == ord("q"):
111
+ break
112
+
113
+ cap.release()
114
+ cv2.destroyAllWindows()
opencv_test/untitled15.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.optim as optim
5
+ import numpy as np
6
+
7
+ # ======== AI MODEL (PyTorch) ========
8
+ device = torch.device("cpu")
9
+
10
+ label_map = {"Idle": 0, "Normal": 1, "Erratic": 2}
11
+ reverse_label = {v: k for k, v in label_map.items()}
12
+
13
+ class BehaviorAI(nn.Module):
14
+ def __init__(self):
15
+ super().__init__()
16
+ self.model = nn.Sequential(
17
+ nn.Linear(4, 16),
18
+ nn.ReLU(),
19
+ nn.Linear(16, 8),
20
+ nn.ReLU(),
21
+ nn.Linear(8, 3)
22
+ )
23
+ self.loss_fn = nn.CrossEntropyLoss()
24
+ self.optimizer = optim.Adam(self.model.parameters(), lr=0.001)
25
+
26
+ def forward(self, x):
27
+ return self.model(x)
28
+
29
+ def predict_behavior(self, features):
30
+ self.model.eval()
31
+ with torch.no_grad():
32
+ x = torch.tensor([features], dtype=torch.float32).to(device)
33
+ logits = self.model(x)
34
+ pred = torch.argmax(logits, dim=-1).item()
35
+ return reverse_label[pred]
36
+
37
+ def learn_from(self, features, label):
38
+ self.model.train()
39
+ x = torch.tensor([features], dtype=torch.float32).to(device)
40
+ y = torch.tensor([label_map[label]], dtype=torch.long).to(device)
41
+ logits = self.model(x)
42
+ loss = self.loss_fn(logits, y)
43
+ self.optimizer.zero_grad()
44
+ loss.backward()
45
+ self.optimizer.step()
46
+
47
+ # ======== FEATURE EXTRACTION ========
48
+ def extract_features(trace):
49
+ if len(trace) < 2:
50
+ return [0, 0, 0, 0]
51
+
52
+ dx = trace[-1][0] - trace[0][0]
53
+ dy = trace[-1][1] - trace[0][1]
54
+ speeds = []
55
+ directions = []
56
+
57
+ for i in range(1, len(trace)):
58
+ x1, y1 = trace[i-1]
59
+ x2, y2 = trace[i]
60
+ dist = np.linalg.norm([x2 - x1, y2 - y1])
61
+ speeds.append(dist)
62
+ directions.append(np.arctan2(y2 - y1, x2 - x1))
63
+
64
+ avg_speed = np.mean(speeds)
65
+ direction_changes = np.sum(np.abs(np.diff(directions)))
66
+ return [dx, dy, avg_speed, direction_changes]
67
+
68
+ # ======== MAIN REAL-TIME TRACKING ========
69
+ cap = cv2.VideoCapture(0) # یا 'video.mp4' برای فایل
70
+
71
+ bg_subtractor = cv2.createBackgroundSubtractorMOG2()
72
+ traces = {}
73
+ next_id = 0
74
+ ai = BehaviorAI()
75
+
76
+ while True:
77
+ ret, frame = cap.read()
78
+ if not ret:
79
+ break
80
+
81
+ fgmask = bg_subtractor.apply(frame)
82
+ contours, _ = cv2.findContours(fgmask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
83
+
84
+ current_positions = []
85
+
86
+ for cnt in contours:
87
+ if cv2.contourArea(cnt) < 500:
88
+ continue
89
+
90
+ x, y, w, h = cv2.boundingRect(cnt)
91
+ cx, cy = x + w // 2, y + h // 2
92
+ current_positions.append((cx, cy))
93
+ cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
94
+
95
+ new_traces = {}
96
+ matched_ids = set()
97
+
98
+ for cx, cy in current_positions:
99
+ min_dist = float('inf')
100
+ matched_id = None
101
+ for id, trace in traces.items():
102
+ if len(trace) == 0:
103
+ continue
104
+ prev_x, prev_y = trace[-1]
105
+ dist = np.linalg.norm([cx - prev_x, cy - prev_y])
106
+ if dist < 50 and id not in matched_ids:
107
+ min_dist = dist
108
+ matched_id = id
109
+
110
+ if matched_id is None:
111
+ matched_id = next_id
112
+ next_id += 1
113
+ new_traces[matched_id] = []
114
+
115
+ else:
116
+ new_traces[matched_id] = traces[matched_id]
117
+
118
+ new_traces[matched_id].append((cx, cy))
119
+ matched_ids.add(matched_id)
120
+
121
+ traces = new_traces
122
+
123
+ for id, trace in traces.items():
124
+ if len(trace) >= 2:
125
+ for i in range(1, len(trace)):
126
+ cv2.line(frame, trace[i-1], trace[i], (255, 0, 0), 2)
127
+
128
+ features = extract_features(trace)
129
+ behavior = ai.predict_behavior(features)
130
+
131
+ if len(trace) >= 10:
132
+ if features[2] < 2:
133
+ label = "Idle"
134
+ elif features[3] > 4:
135
+ label = "Erratic"
136
+ else:
137
+ label = "Normal"
138
+ ai.learn_from(features, label)
139
+
140
+ cv2.putText(frame, f"ID:{id} AI:{behavior}", trace[-1], cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 1)
141
+
142
+ cv2.imshow("Real-Time Tracker with AI", frame)
143
+ if cv2.waitKey(1) == 27: # ESC
144
+ break
145
+
146
+ cap.release()
147
+ cv2.destroyAllWindows()
opencv_test/untitled16.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import torch
4
+
5
+ # AI-based Visual Memory
6
+ class ObjectMemory:
7
+ def __init__(self):
8
+ self.memory = {} # object_id: feature_vector
9
+ self.next_id = 1
10
+
11
+ def extract_features(self, crop):
12
+ try:
13
+ crop_resized = cv2.resize(crop, (32, 32)) # Resize to fixed size
14
+ crop_tensor = torch.tensor(crop_resized.transpose(2, 0, 1), dtype=torch.float32).unsqueeze(0) / 255.0
15
+ return crop_tensor.view(-1) # Flatten
16
+ except:
17
+ return None
18
+
19
+ def memorize(self, crop):
20
+ vec = self.extract_features(crop)
21
+ if vec is None:
22
+ return None
23
+ obj_id = self.next_id
24
+ self.memory[obj_id] = vec
25
+ self.next_id += 1
26
+ return obj_id
27
+
28
+ def find_match(self, crop, threshold=0.95):
29
+ vec = self.extract_features(crop)
30
+ if vec is None:
31
+ return None, 0.0
32
+
33
+ best_id = None
34
+ best_sim = 0.0
35
+ for obj_id, stored_vec in self.memory.items():
36
+ sim = torch.cosine_similarity(vec, stored_vec, dim=0).item()
37
+ if sim > best_sim and sim > threshold:
38
+ best_sim = sim
39
+ best_id = obj_id
40
+
41
+ return best_id, best_sim
42
+
43
+ # Video object tracker
44
+ def main():
45
+ cap = cv2.VideoCapture(0) # Use webcam; change to "video.mp4" for a file
46
+ fgbg = cv2.createBackgroundSubtractorMOG2()
47
+ memory = ObjectMemory()
48
+
49
+ while True:
50
+ ret, frame = cap.read()
51
+ if not ret:
52
+ break
53
+
54
+ fgmask = fgbg.apply(frame)
55
+ _, thresh = cv2.threshold(fgmask, 200, 255, cv2.THRESH_BINARY)
56
+ contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
57
+
58
+ for cnt in contours:
59
+ if cv2.contourArea(cnt) < 800:
60
+ continue
61
+
62
+ x, y, w, h = cv2.boundingRect(cnt)
63
+ crop = frame[y:y+h, x:x+w]
64
+
65
+ match_id, sim = memory.find_match(crop)
66
+ if match_id is not None:
67
+ label = f"Seen before (ID {match_id})"
68
+ color = (0, 255, 0)
69
+ else:
70
+ new_id = memory.memorize(crop)
71
+ label = f"New Object (ID {new_id})"
72
+ color = (255, 0, 0)
73
+
74
+ cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
75
+ cv2.putText(frame, label, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX,
76
+ 0.6, (255, 255, 255), 2)
77
+
78
+ cv2.imshow("Object Tracker with Memory", frame)
79
+ if cv2.waitKey(1) & 0xFF == 27: # ESC to quit
80
+ break
81
+
82
+ cap.release()
83
+ cv2.destroyAllWindows()
84
+
85
+ if __name__ == "__main__":
86
+ main()
opencv_test/untitled17.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import torch
4
+ import torchvision.transforms as transforms
5
+ from torchvision.models import resnet18
6
+ from torch.nn.functional import cosine_similarity
7
+
8
+ # Use GPU if available
9
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
10
+
11
+ # Feature extractor using pretrained ResNet18
12
+ class VisualFeatureExtractor:
13
+ def __init__(self):
14
+ model = resnet18(pretrained=True)
15
+ self.model = torch.nn.Sequential(*list(model.children())[:-1]).to(device).eval()
16
+ self.transform = transforms.Compose([
17
+ transforms.ToPILImage(),
18
+ transforms.Resize((224, 224)),
19
+ transforms.ToTensor()
20
+ ])
21
+
22
+ def extract(self, image):
23
+ try:
24
+ tensor = self.transform(image).unsqueeze(0).to(device)
25
+ with torch.no_grad():
26
+ features = self.model(tensor).squeeze()
27
+ return features / features.norm()
28
+ except:
29
+ return None
30
+
31
+ # Memory system for object identity
32
+ class ObjectMemory:
33
+ def __init__(self):
34
+ self.memory = {} # id: feature_vector
35
+ self.next_id = 1
36
+
37
+ def compare(self, feat, threshold=0.9):
38
+ best_id, best_sim = None, 0.0
39
+ for obj_id, stored_feat in self.memory.items():
40
+ sim = cosine_similarity(feat, stored_feat, dim=0).item()
41
+ if sim > best_sim and sim > threshold:
42
+ best_id, best_sim = obj_id, sim
43
+ return best_id, best_sim
44
+
45
+ def memorize(self, feat):
46
+ obj_id = self.next_id
47
+ self.memory[obj_id] = feat
48
+ self.next_id += 1
49
+ return obj_id
50
+
51
+ # Main application
52
+ def main():
53
+ cap = cv2.VideoCapture(0)
54
+ fgbg = cv2.createBackgroundSubtractorMOG2()
55
+ extractor = VisualFeatureExtractor()
56
+ memory = ObjectMemory()
57
+
58
+ while True:
59
+ ret, frame = cap.read()
60
+ if not ret:
61
+ break
62
+
63
+ fgmask = fgbg.apply(frame)
64
+ _, thresh = cv2.threshold(fgmask, 200, 255, cv2.THRESH_BINARY)
65
+ contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
66
+
67
+ for cnt in contours:
68
+ if cv2.contourArea(cnt) < 1000:
69
+ continue
70
+
71
+ x, y, w, h = cv2.boundingRect(cnt)
72
+ crop = frame[y:y+h, x:x+w]
73
+ feat = extractor.extract(crop)
74
+
75
+ if feat is None:
76
+ continue
77
+
78
+ matched_id, similarity = memory.compare(feat)
79
+
80
+ if matched_id is not None:
81
+ label = f"Known ID {matched_id} ({similarity*100:.1f}%)"
82
+ color = (0, 255, 0)
83
+ else:
84
+ new_id = memory.memorize(feat)
85
+ label = f"New Object (ID {new_id})"
86
+ color = (0, 0, 255)
87
+
88
+ cv2.rectangle(frame, (x, y), (x+w, y+h), color, 2)
89
+ cv2.putText(frame, label, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX,
90
+ 0.6, (255, 255, 255), 2)
91
+
92
+ cv2.imshow("AI Object Memory", frame)
93
+ if cv2.waitKey(1) & 0xFF == 27: # ESC
94
+ break
95
+
96
+ cap.release()
97
+ cv2.destroyAllWindows()
98
+
99
+ if __name__ == "__main__":
100
+ main()
opencv_test/untitled18.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import torch
4
+ import torchvision.transforms as transforms
5
+ from torchvision.models import mobilenet_v2
6
+ from torch.nn.functional import cosine_similarity
7
+
8
+ # Use GPU if available
9
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
10
+
11
+ # Lightweight feature extractor using MobileNetV2
12
+ class FastFeatureExtractor:
13
+ def __init__(self):
14
+ model = mobilenet_v2(pretrained=True).features
15
+ self.model = torch.nn.Sequential(*list(model.children())[:-1]).to(device).eval()
16
+ self.transform = transforms.Compose([
17
+ transforms.ToPILImage(),
18
+ transforms.Resize((96, 96)),
19
+ transforms.ToTensor()
20
+ ])
21
+
22
+ def extract(self, image):
23
+ try:
24
+ tensor = self.transform(image).unsqueeze(0).to(device)
25
+ with torch.no_grad():
26
+ feat = self.model(tensor).mean([2, 3]).squeeze()
27
+ return feat / feat.norm()
28
+ except:
29
+ return None
30
+
31
+ # Simple memory with similarity threshold
32
+ class ObjectMemory:
33
+ def __init__(self, threshold=0.88):
34
+ self.memory = {}
35
+ self.next_id = 1
36
+ self.threshold = threshold
37
+
38
+ def match(self, feat):
39
+ best_id, best_sim = None, 0.0
40
+ for obj_id, ref_feat in self.memory.items():
41
+ sim = cosine_similarity(feat, ref_feat, dim=0).item()
42
+ if sim > best_sim and sim > self.threshold:
43
+ best_id, best_sim = obj_id, sim
44
+ return best_id, best_sim
45
+
46
+ def add(self, feat):
47
+ obj_id = self.next_id
48
+ self.memory[obj_id] = feat
49
+ self.next_id += 1
50
+ return obj_id
51
+
52
+ # Main app
53
+ def main():
54
+ cap = cv2.VideoCapture(0)
55
+ fgbg = cv2.createBackgroundSubtractorMOG2()
56
+ extractor = FastFeatureExtractor()
57
+ memory = ObjectMemory()
58
+
59
+ while True:
60
+ ret, frame = cap.read()
61
+ if not ret:
62
+ break
63
+
64
+ fg = fgbg.apply(frame)
65
+ _, thresh = cv2.threshold(fg, 200, 255, cv2.THRESH_BINARY)
66
+ contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
67
+
68
+ for cnt in contours:
69
+ if cv2.contourArea(cnt) < 1200:
70
+ continue
71
+
72
+ x, y, w, h = cv2.boundingRect(cnt)
73
+ roi = frame[y:y+h, x:x+w]
74
+ feat = extractor.extract(roi)
75
+
76
+ if feat is None:
77
+ continue
78
+
79
+ matched_id, similarity = memory.match(feat)
80
+ if matched_id:
81
+ label = f"Known #{matched_id} ({similarity*100:.1f}%)"
82
+ color = (0, 255, 0)
83
+ else:
84
+ new_id = memory.add(feat)
85
+ label = f"New Object #{new_id}"
86
+ color = (0, 0, 255)
87
+
88
+ cv2.rectangle(frame, (x, y), (x+w, y+h), color, 2)
89
+ cv2.putText(frame, label, (x, y-8), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
90
+
91
+ cv2.imshow("Fast Object Understanding", frame)
92
+ if cv2.waitKey(1) & 0xFF == 27: # ESC to exit
93
+ break
94
+
95
+ cap.release()
96
+ cv2.destroyAllWindows()
97
+
98
+ if __name__ == "__main__":
99
+ main()
opencv_test/untitled2.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+
4
+ # استفاده از الگوریتم پیشرفته KNN برای background subtraction
5
+ back_sub = cv2.createBackgroundSubtractorKNN(history=500, dist2Threshold=400, detectShadows=True)
6
+
7
+ # تابع برای محاسبه مرکز (centroid)
8
+ def get_centroid(x, y, w, h):
9
+ return (int(x + w / 2), int(y + h / 2))
10
+
11
+ # ایجاد یک فیلتر کالمن برای پیگیری حرکت شیء
12
+ kalman = cv2.KalmanFilter(4, 2)
13
+ kalman.transitionMatrix = np.array([[1, 0, 1, 0],
14
+ [0, 1, 0, 1],
15
+ [0, 0, 1, 0],
16
+ [0, 0, 0, 1]], np.float32)
17
+ kalman.measurementMatrix = np.array([[1, 0, 0, 0],
18
+ [0, 1, 0, 0]], np.float32)
19
+ kalman.processNoiseCov = np.array([[1e-2, 0, 0, 0],
20
+ [0, 1e-2, 0, 0],
21
+ [0, 0, 1, 0],
22
+ [0, 0, 0, 1]], np.float32)
23
+ kalman.errorCovPost = np.eye(4, dtype=np.float32)
24
+ kalman.statePost = np.zeros((4, 1), np.float32)
25
+
26
+ # گرفتن تصویر از دوربین
27
+ cap = cv2.VideoCapture(0)
28
+
29
+ while True:
30
+ ret, frame = cap.read()
31
+ if not ret:
32
+ break
33
+
34
+ # تبدیل تصویر به خاکستری
35
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
36
+
37
+ # دریافت ماسک اشیاء متحرک
38
+ fg_mask = back_sub.apply(frame)
39
+
40
+ # اعمال عملیات مورفولوژیکی برای حذف سایه‌ها و نویز
41
+ kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))
42
+ fg_mask = cv2.morphologyEx(fg_mask, cv2.MORPH_OPEN, kernel, iterations=1)
43
+ fg_mask = cv2.dilate(fg_mask, kernel, iterations=1)
44
+
45
+ # پیدا کردن کانتورها
46
+ contours, _ = cv2.findContours(fg_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
47
+
48
+ for cnt in contours:
49
+ area = cv2.contourArea(cnt)
50
+ if area > 100: # فقط اشیاء بزرگتر از این اندازه رو بررسی کن
51
+ x, y, w, h = cv2.boundingRect(cnt)
52
+ centroid = get_centroid(x, y, w, h)
53
+ cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
54
+ cv2.circle(frame, centroid, 4, (0, 0, 255), -1)
55
+ cv2.putText(frame, "Moving Object", (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
56
+
57
+ # اندازه‌گیری موقعیت شیء و پیش‌بینی با فیلتر کالمن
58
+ kalman.correct(np.array([x + w / 2, y + h / 2], np.float32))
59
+ prediction = kalman.predict()
60
+
61
+ # استخراج مقادیر پیش‌بینی شده
62
+ predicted_x, predicted_y = int(prediction[0, 0]), int(prediction[1, 0])
63
+
64
+ # رسم موقعیت پیش‌بینی شده
65
+ cv2.circle(frame, (predicted_x, predicted_y), 4, (255, 0, 0), -1)
66
+
67
+ # نمایش تصویر پردازش شده
68
+ cv2.imshow('Real-Time Object Tracking with Kalman Filter', frame)
69
+
70
+ # خروج از برنامه با کلید ESC
71
+ if cv2.waitKey(1) & 0xFF == 27:
72
+ break
73
+
74
+ cap.release()
75
+ cv2.destroyAllWindows()
opencv_test/untitled21.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import torch
4
+ import torchvision.transforms as transforms
5
+ from torchvision.models import mobilenet_v3_small
6
+ from torch.nn.functional import cosine_similarity
7
+
8
+ # Use GPU if available
9
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
10
+
11
+ # Lightweight feature extractor using MobileNetV3
12
+ class FastFeatureExtractor:
13
+ def __init__(self):
14
+ model = mobilenet_v3_small(pretrained=True).features
15
+ self.model = torch.nn.Sequential(*list(model.children())[:-1]).to(device).eval()
16
+ self.transform = transforms.Compose([
17
+ transforms.ToPILImage(),
18
+ transforms.Resize((96, 96)),
19
+ transforms.ToTensor()
20
+ ])
21
+
22
+ def extract(self, image):
23
+ try:
24
+ tensor = self.transform(image).unsqueeze(0).to(device)
25
+ with torch.no_grad():
26
+ feat = self.model(tensor).mean([2, 3]).squeeze()
27
+ return feat / feat.norm()
28
+ except:
29
+ return None
30
+
31
+ # Simple memory with similarity threshold
32
+ class ObjectMemory:
33
+ def __init__(self, threshold=0.88):
34
+ self.memory = {}
35
+ self.next_id = 1
36
+ self.threshold = threshold
37
+
38
+ def match(self, feat):
39
+ best_id, best_sim = None, 0.0
40
+ for obj_id, ref_feat in self.memory.items():
41
+ sim = cosine_similarity(feat, ref_feat, dim=0).item()
42
+ if sim > best_sim and sim > self.threshold:
43
+ best_id, best_sim = obj_id, sim
44
+ return best_id, best_sim
45
+
46
+ def add(self, feat):
47
+ obj_id = self.next_id
48
+ self.memory[obj_id] = feat
49
+ self.next_id += 1
50
+ return obj_id
51
+
52
+ # Main app
53
+ def main():
54
+ cap = cv2.VideoCapture(0)
55
+ fgbg = cv2.createBackgroundSubtractorMOG2()
56
+ extractor = FastFeatureExtractor()
57
+ memory = ObjectMemory()
58
+
59
+ while True:
60
+ ret, frame = cap.read()
61
+ if not ret:
62
+ break
63
+
64
+ fg = fgbg.apply(frame)
65
+ _, thresh = cv2.threshold(fg, 200, 255, cv2.THRESH_BINARY)
66
+ contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
67
+
68
+ for cnt in contours:
69
+ if cv2.contourArea(cnt) < 1200:
70
+ continue
71
+
72
+ x, y, w, h = cv2.boundingRect(cnt)
73
+ roi = frame[y:y+h, x:x+w]
74
+ feat = extractor.extract(roi)
75
+
76
+ if feat is None:
77
+ continue
78
+
79
+ matched_id, similarity = memory.match(feat)
80
+ if matched_id:
81
+ label = f"Known #{matched_id} ({similarity*100:.1f}%)"
82
+ color = (0, 255, 0)
83
+ else:
84
+ new_id = memory.add(feat)
85
+ label = f"New Object #{new_id}"
86
+ color = (0, 0, 255)
87
+
88
+ cv2.rectangle(frame, (x, y), (x+w, y+h), color, 2)
89
+ cv2.putText(frame, label, (x, y-8), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
90
+
91
+ cv2.imshow("Fast Object Understanding", frame)
92
+ if cv2.waitKey(1) & 0xFF == 27: # ESC to exit
93
+ break
94
+
95
+ cap.release()
96
+ cv2.destroyAllWindows()
97
+
98
+ if __name__ == "__main__":
99
+ main()
opencv_test/untitled22.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from ultralytics import YOLO
2
+
3
+ # بارگذاری مدل YOLOv8 از پیش آموزش‌دیده
4
+ model = YOLO("yolo11x.pt") # مدل‌های مختلف: yolov8n.pt, yolov8s.pt, yolov8m.pt
5
+
opencv_test/untitled23.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ from ultralytics import YOLO
3
+
4
+ # بارگذاری مدل YOLOv8 از پیش آموزش‌دیده
5
+ model = YOLO("yolo11n.pt") # مدل‌های مختلف: yolov8n.pt, yolov8s.pt, yolov8m.pt
6
+
7
+ # استفاده از دوربین برای تشخیص اشیا به صورت real-time
8
+ cap = cv2.VideoCapture(0) # 0 برای استفاده از دوربین پیش‌فرض سیستم
9
+
10
+ while True:
11
+ ret, frame = cap.read()
12
+
13
+ if not ret:
14
+ break
15
+
16
+ # انجام تشخیص روی هر فریم از ویدیو
17
+ results = model(frame)
18
+
19
+ # نتایج مدل در قالب لیستی از اشیا است
20
+ result = results[0] # گرفتن اولین نتیجه
21
+
22
+ # نمایش فریم با نتایج پیش‌بینی شده
23
+ result.show() # نمایش تصاویر با مربعات شناسایی شده
24
+
25
+ # بررسی اگر کلید "q" فشار داده شد برای بستن ویدیو
26
+ if cv2.waitKey(1) & 0xFF == ord('q'):
27
+ break
28
+
29
+ # آزادسازی منابع
30
+ cap.release()
31
+ cv2.destroyAllWindows()
opencv_test/untitled24.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ from ultralytics import YOLO
3
+
4
+ # بارگذاری مدل YOLOv8 از پیش آموزش‌دیده
5
+ model = YOLO("yolo12l.pt") # مدل‌های مختلف: yolov8n.pt, yolov8s.pt, yolov8m.pt
6
+
7
+ # استفاده از دوربین برای تشخیص اشیا به صورت real-time
8
+ cap = cv2.VideoCapture(0) # 0 برای استفاده از دوربین پیش‌فرض سیستم
9
+
10
+ while True:
11
+ ret, frame = cap.read()
12
+
13
+ if not ret:
14
+ break
15
+
16
+ # انجام تشخیص روی هر فریم از ویدیو
17
+ results = model(frame)
18
+
19
+ # نتایج مدل در قالب لیستی از اشیا است
20
+ result = results[0] # گرفتن اولین نتیجه
21
+
22
+ # نمایش فریم با نتایج پیش‌بینی شده
23
+ frame_with_boxes = result.plot() # تصویر با جعبه‌های شناسایی شده
24
+ cv2.imshow('Real-Time Object Detection', frame_with_boxes) # نمایش فریم با جعبه‌ها
25
+
26
+ # بررسی اگر کلید "q" فشار داده شد برای بستن ویدیو
27
+ if cv2.waitKey(1) & 0xFF == ord('q'):
28
+ break
29
+
30
+ # آزادسازی منابع
31
+ cap.release()
32
+ cv2.destroyAllWindows()
opencv_test/untitled3.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ from sklearn.neighbors import KNeighborsClassifier
4
+
5
+ # استفاده از الگوریتم پیشرفته KNN برای background subtraction
6
+ back_sub = cv2.createBackgroundSubtractorKNN(history=500, dist2Threshold=400, detectShadows=True)
7
+
8
+ # تابع برای محاسبه مرکز (centroid)
9
+ def get_centroid(x, y, w, h):
10
+ return (int(x + w / 2), int(y + h / 2))
11
+
12
+ # گرفتن تصویر از دوربین
13
+ cap = cv2.VideoCapture(0)
14
+
15
+ # مدل KNN
16
+ knn = KNeighborsClassifier(n_neighbors=3)
17
+
18
+ # داده‌های آموزشی و برچسب‌ها
19
+ object_features = []
20
+ object_labels = []
21
+
22
+ # تنظیمات برای آموزش دوره‌ای
23
+ learning_interval = 30 # هر 30 فریم یک بار آموزش انجام شود
24
+ frame_count = 0
25
+
26
+ while True:
27
+ ret, frame = cap.read()
28
+ if not ret:
29
+ break
30
+
31
+ # تبدیل تصویر به خاکستری
32
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
33
+
34
+ # دریافت ماسک اشیاء متحرک
35
+ fg_mask = back_sub.apply(frame)
36
+
37
+ # اعمال عملیات مورفولوژیکی برای حذف سایه‌ها و نویز
38
+ kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))
39
+ fg_mask = cv2.morphologyEx(fg_mask, cv2.MORPH_OPEN, kernel, iterations=1)
40
+ fg_mask = cv2.dilate(fg_mask, kernel, iterations=1)
41
+
42
+ # پیدا کردن کانتورها
43
+ contours, _ = cv2.findContours(fg_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
44
+
45
+ for cnt in contours:
46
+ area = cv2.contourArea(cnt)
47
+ if area > 100: # فقط اشیاء بزرگتر از این اندازه رو بررسی کن
48
+ x, y, w, h = cv2.boundingRect(cnt)
49
+ centroid = get_centroid(x, y, w, h)
50
+
51
+ # استخراج ویژگی‌ها
52
+ features = [w, h, centroid[0], centroid[1]]
53
+ object_features.append(features)
54
+ object_labels.append(1) # فرض می‌کنیم همه اشیاء متحرک به کلاس 1 تعلق دارند
55
+
56
+ # رسم باکس و مرکز
57
+ cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
58
+ cv2.circle(frame, centroid, 4, (0, 0, 255), -1)
59
+
60
+ # آموزش مدل به صورت دوره‌ای
61
+ frame_count += 1
62
+ if frame_count % learning_interval == 0 and len(object_features) > 5:
63
+ # آموزش مدل فقط زمانی که داده‌ها کافی باشند
64
+ knn.fit(object_features, object_labels)
65
+ print("Model updated!")
66
+
67
+ # پیش‌بینی با مدل KNN برای اشیاء جدید
68
+ if len(object_features) > 5 and frame_count % learning_interval == 0:
69
+ # اطمینان حاصل می‌کنیم که مدل آموزش داده شده است قبل از پیش‌بینی
70
+ predicted_label = knn.predict([features])[0]
71
+ cv2.putText(frame, f"Predicted: {predicted_label}", (x, y - 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
72
+
73
+ cv2.imshow('Optimized Object Tracking', frame)
74
+
75
+ # خروج از برنامه با کلید ESC
76
+ if cv2.waitKey(1) & 0xFF == 27:
77
+ break
78
+
79
+ cap.release()
80
+ cv2.destroyAllWindows()
opencv_test/untitled5.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ from sklearn.neighbors import KNeighborsClassifier
4
+
5
+ # استفاده از الگوریتم پیشرفته KNN برای background subtraction
6
+ back_sub = cv2.createBackgroundSubtractorKNN(history=500, dist2Threshold=400, detectShadows=True)
7
+
8
+ # تابع برای محاسبه مرکز (centroid)
9
+ def get_centroid(x, y, w, h):
10
+ return (int(x + w / 2), int(y + h / 2))
11
+
12
+ # گرفتن تصویر از دوربین
13
+ cap = cv2.VideoCapture(0)
14
+
15
+ # مدل KNN
16
+ knn = KNeighborsClassifier(n_neighbors=3)
17
+
18
+ # داده‌های آموزشی و برچسب‌ها
19
+ object_features = []
20
+ object_labels = []
21
+
22
+ # تنظیمات برای آموزش دوره‌ای
23
+ learning_interval = 30 # هر 30 فریم یک بار آموزش انجام شود
24
+ frame_count = 0
25
+
26
+ # استفاده از فیلتر برای کاهش نویز
27
+ def apply_noise_reduction(fg_mask):
28
+ kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
29
+ fg_mask = cv2.morphologyEx(fg_mask, cv2.MORPH_OPEN, kernel, iterations=2) # حذف نویز به‌وسیله باز کردن
30
+ fg_mask = cv2.dilate(fg_mask, kernel, iterations=1) # گسترش اشیاء برای تقویت
31
+ return fg_mask
32
+
33
+ while True:
34
+ ret, frame = cap.read()
35
+ if not ret:
36
+ break
37
+
38
+ # تبدیل تصویر به خاکستری
39
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
40
+
41
+ # دریافت ماسک اشیاء متحرک
42
+ fg_mask = back_sub.apply(frame)
43
+
44
+ # اعمال فیلتر برای کاهش نویز
45
+ fg_mask = apply_noise_reduction(fg_mask)
46
+
47
+ # پیدا کردن کانتورها
48
+ contours, _ = cv2.findContours(fg_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
49
+
50
+ for cnt in contours:
51
+ area = cv2.contourArea(cnt)
52
+ if area > 100: # فقط اشیاء بزرگتر از این اندازه رو بررسی کن
53
+ x, y, w, h = cv2.boundingRect(cnt)
54
+ centroid = get_centroid(x, y, w, h)
55
+
56
+ # استخراج ویژگی‌ها
57
+ features = [w, h, centroid[0], centroid[1], area] # اضافه کردن مساحت به ویژگی‌ها
58
+ object_features.append(features)
59
+ object_labels.append(1) # فرض می‌کنیم همه اشیاء متحرک به کلاس 1 تعلق دارند
60
+
61
+ # رسم باکس و مرکز
62
+ cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
63
+ cv2.circle(frame, centroid, 4, (0, 0, 255), -1)
64
+
65
+ # آموزش مدل به صورت دوره‌ای
66
+ frame_count += 1
67
+ if frame_count % learning_interval == 0 and len(object_features) > 5:
68
+ # آموزش مدل فقط زمانی که داده‌ها کافی باشند
69
+ knn.fit(object_features, object_labels)
70
+ print("Model updated!")
71
+
72
+ # پیش‌بینی با مدل KNN برای اشیاء جدید
73
+ if len(object_features) > 5 and frame_count % learning_interval == 0:
74
+ # اطمینان حاصل می‌کنیم که مدل آموزش داده شده است قبل از پیش‌بینی
75
+ predicted_label = knn.predict([features])[0]
76
+ cv2.putText(frame, f"Predicted: {predicted_label}", (x, y - 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
77
+
78
+ # نمایش تصویر
79
+ cv2.imshow('Optimized Object Tracking', frame)
80
+
81
+ # خروج از برنامه با کلید ESC
82
+ if cv2.waitKey(1) & 0xFF == 27:
83
+ break
84
+
85
+ cap.release()
86
+ cv2.destroyAllWindows()
opencv_test/untitled6.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ from sklearn.neighbors import KNeighborsClassifier
4
+
5
+ # استفاده از الگوریتم پیشرفته KNN برای background subtraction
6
+ back_sub = cv2.createBackgroundSubtractorKNN(history=500, dist2Threshold=400, detectShadows=True)
7
+
8
+ # تابع برای محاسبه مرکز (centroid)
9
+ def get_centroid(x, y, w, h):
10
+ return (int(x + w / 2), int(y + h / 2))
11
+
12
+ # تابع برای کاهش نویز
13
+ def apply_noise_reduction(fg_mask):
14
+ kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
15
+ fg_mask = cv2.morphologyEx(fg_mask, cv2.MORPH_OPEN, kernel, iterations=2)
16
+ fg_mask = cv2.dilate(fg_mask, kernel, iterations=1)
17
+ return fg_mask
18
+
19
+ # تابع برای تشخیص جهت حرکت
20
+ def detect_direction(prev, curr):
21
+ dx = curr[0] - prev[0]
22
+ dy = curr[1] - prev[1]
23
+ if abs(dx) > abs(dy):
24
+ return "Right" if dx > 0 else "Left"
25
+ else:
26
+ return "Down" if dy > 0 else "Up"
27
+
28
+ # گرفتن تصویر از دوربین
29
+ cap = cv2.VideoCapture(0)
30
+
31
+ # مدل KNN
32
+ knn = KNeighborsClassifier(n_neighbors=3)
33
+
34
+ # داده‌های آموزشی و برچسب‌ها
35
+ object_features = []
36
+ object_labels = []
37
+
38
+ # تنظیمات آموزش دوره‌ای
39
+ learning_interval = 30
40
+ frame_count = 0
41
+
42
+ # نگه‌داری آخرین موقعیت centroid برای دنبال‌ کردن مسیر
43
+ prev_centroids = []
44
+
45
+ while True:
46
+ ret, frame = cap.read()
47
+ if not ret:
48
+ break
49
+
50
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
51
+ fg_mask = back_sub.apply(frame)
52
+ fg_mask = apply_noise_reduction(fg_mask)
53
+ contours, _ = cv2.findContours(fg_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
54
+
55
+ current_centroids = []
56
+
57
+ for cnt in contours:
58
+ area = cv2.contourArea(cnt)
59
+ if area > 100:
60
+ x, y, w, h = cv2.boundingRect(cnt)
61
+ centroid = get_centroid(x, y, w, h)
62
+ current_centroids.append(centroid)
63
+
64
+ # استخراج ویژگی‌ها
65
+ features = [w, h, centroid[0], centroid[1], area]
66
+ object_features.append(features)
67
+ object_labels.append(1)
68
+
69
+ # رسم باکس و مرکز
70
+ cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
71
+ cv2.circle(frame, centroid, 4, (0, 0, 255), -1)
72
+
73
+ # اگر centroid قبلی موجود است، جهت را تشخیص بده
74
+ if len(prev_centroids) > 0:
75
+ closest_prev = min(prev_centroids, key=lambda p: np.linalg.norm(np.array(p) - np.array(centroid)))
76
+ direction = detect_direction(closest_prev, centroid)
77
+ cv2.putText(frame, f"Dir: {direction}", (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 0), 2)
78
+
79
+ # به‌روزرسانی centroid های قبلی
80
+ prev_centroids = current_centroids.copy()
81
+
82
+ # آموزش دوره‌ای مدل
83
+ frame_count += 1
84
+ if frame_count % learning_interval == 0 and len(object_features) > 5:
85
+ knn.fit(object_features, object_labels)
86
+ print("Model updated!")
87
+
88
+ if len(object_features) > 5 and frame_count % learning_interval == 0:
89
+ predicted_label = knn.predict([features])[0]
90
+ cv2.putText(frame, f"Predicted: {predicted_label}", (x, y - 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
91
+
92
+ # نمایش تصویر
93
+ cv2.imshow('Object Tracking with Direction', frame)
94
+
95
+ # خروج با کلید ESC
96
+ if cv2.waitKey(1) & 0xFF == 27:
97
+ break
98
+
99
+ cap.release()
100
+ cv2.destroyAllWindows()
opencv_test/untitled7.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import time
4
+ from sklearn.neighbors import KNeighborsClassifier
5
+ from collections import defaultdict, deque
6
+
7
+ back_sub = cv2.createBackgroundSubtractorKNN(history=500, dist2Threshold=400, detectShadows=True)
8
+ cap = cv2.VideoCapture(0)
9
+
10
+ # ذخیره مسیر اشیاء
11
+ object_traces = defaultdict(lambda: deque(maxlen=30)) # آخرین ۳۰ نقطه هر شیء
12
+ object_last_seen = {}
13
+ object_id_counter = 0
14
+
15
+ # برای یادگیری real-time
16
+ knn = KNeighborsClassifier(n_neighbors=3)
17
+ features_set = []
18
+ labels_set = []
19
+ frame_count = 0
20
+ learning_interval = 30
21
+
22
+ def apply_noise_reduction(mask):
23
+ kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
24
+ mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=2)
25
+ mask = cv2.dilate(mask, kernel, iterations=1)
26
+ return mask
27
+
28
+ def get_centroid(x, y, w, h):
29
+ return (int(x + w / 2), int(y + h / 2))
30
+
31
+ def calculate_direction(trace):
32
+ if len(trace) < 2:
33
+ return "-"
34
+ dx = trace[-1][0] - trace[0][0]
35
+ dy = trace[-1][1] - trace[0][1]
36
+ if abs(dx) > abs(dy):
37
+ return "چپ" if dx < 0 else "راست"
38
+ else:
39
+ return "بالا" if dy < 0 else "پایین"
40
+
41
+ def calculate_speed(trace, duration):
42
+ if len(trace) < 2 or duration == 0:
43
+ return 0
44
+ dist = np.linalg.norm(np.array(trace[-1]) - np.array(trace[0]))
45
+ return dist / duration
46
+
47
+ def count_direction_changes(trace):
48
+ changes = 0
49
+ for i in range(2, len(trace)):
50
+ dx1 = trace[i-1][0] - trace[i-2][0]
51
+ dx2 = trace[i][0] - trace[i-1][0]
52
+ if dx1 * dx2 < 0: # تغییر جهت افقی
53
+ changes += 1
54
+ return changes
55
+
56
+ while True:
57
+ ret, frame = cap.read()
58
+ if not ret:
59
+ break
60
+
61
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
62
+ fg_mask = back_sub.apply(frame)
63
+ fg_mask = apply_noise_reduction(fg_mask)
64
+
65
+ contours, _ = cv2.findContours(fg_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
66
+
67
+ current_ids = []
68
+ for cnt in contours:
69
+ area = cv2.contourArea(cnt)
70
+ if area < 150:
71
+ continue
72
+
73
+ x, y, w, h = cv2.boundingRect(cnt)
74
+ centroid = get_centroid(x, y, w, h)
75
+
76
+ # شناسایی یا ایجاد شناسه جدید
77
+ matched_id = None
78
+ for oid, trace in object_traces.items():
79
+ if np.linalg.norm(np.array(trace[-1]) - np.array(centroid)) < 50:
80
+ matched_id = oid
81
+ break
82
+
83
+ if matched_id is None:
84
+ matched_id = object_id_counter
85
+ object_id_counter += 1
86
+
87
+ object_traces[matched_id].append(centroid)
88
+ object_last_seen[matched_id] = time.time()
89
+ current_ids.append(matched_id)
90
+
91
+ trace = object_traces[matched_id]
92
+ duration = time.time() - object_last_seen[matched_id] + 0.001
93
+ speed = calculate_speed(trace, duration)
94
+ direction = calculate_direction(trace)
95
+ direction_changes = count_direction_changes(trace)
96
+ total_move = sum(np.linalg.norm(np.array(trace[i]) - np.array(trace[i-1])) for i in range(1, len(trace)))
97
+
98
+ # ویژگی برای مدل
99
+ feature = [w, h, centroid[0], centroid[1], area, speed, direction_changes]
100
+ label = 1 # کلاس پیش‌فرض: عادی
101
+
102
+ # برچسب‌گذاری خودکار ساده:
103
+ if speed > 100 or direction_changes > 4:
104
+ label = 2 # مشکوک
105
+
106
+ features_set.append(feature)
107
+ labels_set.append(label)
108
+
109
+ if len(features_set) > 10 and frame_count % learning_interval == 0:
110
+ knn.fit(features_set, labels_set)
111
+ print("مدل به‌روزرسانی شد.")
112
+
113
+ predicted = "-"
114
+ if len(features_set) > 10:
115
+ predicted = knn.predict([feature])[0]
116
+
117
+ # رسم اطلاعات روی فریم
118
+ cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0) if label == 1 else (0, 0, 255), 2)
119
+ cv2.circle(frame, centroid, 4, (255, 255, 255), -1)
120
+ cv2.putText(frame, f"ID: {matched_id} | جهت: {direction} | سرعت: {int(speed)}", (x, y - 25),
121
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0), 1)
122
+ cv2.putText(frame, f"رفتار: {'عادی' if predicted == 1 else 'مشکوک'}", (x, y - 5),
123
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 1)
124
+
125
+ frame_count += 1
126
+
127
+ # حذف آی‌دی‌های قدیمی
128
+ for oid in list(object_last_seen):
129
+ if time.time() - object_last_seen[oid] > 2:
130
+ object_traces.pop(oid, None)
131
+ object_last_seen.pop(oid, None)
132
+
133
+ cv2.imshow("هوش رفتاری", frame)
134
+ if cv2.waitKey(1) & 0xFF == 27:
135
+ break
136
+
137
+ cap.release()
138
+ cv2.destroyAllWindows()
opencv_test/untitled8.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import time
4
+ from sklearn.neighbors import KNeighborsClassifier
5
+ from collections import defaultdict, deque
6
+
7
+ back_sub = cv2.createBackgroundSubtractorKNN(history=500, dist2Threshold=400, detectShadows=True)
8
+ cap = cv2.VideoCapture(0)
9
+
10
+ # ذخیره مسیر اشیاء
11
+ object_traces = defaultdict(lambda: deque(maxlen=30)) # آخرین ۳۰ نقطه هر شیء
12
+ object_last_seen = {}
13
+ object_id_counter = 0
14
+
15
+ # برای یادگیری real-time
16
+ knn = KNeighborsClassifier(n_neighbors=3)
17
+ features_set = []
18
+ labels_set = []
19
+ frame_count = 0
20
+ learning_interval = 30
21
+
22
+ def apply_noise_reduction(mask):
23
+ kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
24
+ mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=2)
25
+ mask = cv2.dilate(mask, kernel, iterations=1)
26
+ return mask
27
+
28
+ def get_centroid(x, y, w, h):
29
+ return (int(x + w / 2), int(y + h / 2))
30
+
31
+ def calculate_direction(trace):
32
+ if len(trace) < 2:
33
+ return "-"
34
+ dx = trace[-1][0] - trace[0][0]
35
+ dy = trace[-1][1] - trace[0][1]
36
+ if abs(dx) > abs(dy):
37
+ return "چپ" if dx < 0 else "راست"
38
+ else:
39
+ return "بالا" if dy < 0 else "پایین"
40
+
41
+ def calculate_speed(trace, duration):
42
+ if len(trace) < 2 or duration == 0:
43
+ return 0
44
+ dist = np.linalg.norm(np.array(trace[-1]) - np.array(trace[0]))
45
+ return dist / duration
46
+
47
+ def count_direction_changes(trace):
48
+ changes = 0
49
+ for i in range(2, len(trace)):
50
+ dx1 = trace[i-1][0] - trace[i-2][0]
51
+ dx2 = trace[i][0] - trace[i-1][0]
52
+ if dx1 * dx2 < 0: # تغییر جهت افقی
53
+ changes += 1
54
+ return changes
55
+
56
+ while True:
57
+ ret, frame = cap.read()
58
+ if not ret:
59
+ break
60
+
61
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
62
+ fg_mask = back_sub.apply(frame)
63
+ fg_mask = apply_noise_reduction(fg_mask)
64
+
65
+ contours, _ = cv2.findContours(fg_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
66
+
67
+ current_ids = []
68
+ predicted = 1 # مقدار پیش‌فرض برای پیش‌بینی (در صورتی که پیش‌بینی انجام نشود)
69
+ for cnt in contours:
70
+ area = cv2.contourArea(cnt)
71
+ if area < 150:
72
+ continue
73
+
74
+ x, y, w, h = cv2.boundingRect(cnt)
75
+ centroid = get_centroid(x, y, w, h)
76
+
77
+ # شناسایی یا ایجاد شناسه جدید
78
+ matched_id = None
79
+ for oid, trace in object_traces.items():
80
+ if np.linalg.norm(np.array(trace[-1]) - np.array(centroid)) < 50:
81
+ matched_id = oid
82
+ break
83
+
84
+ if matched_id is None:
85
+ matched_id = object_id_counter
86
+ object_id_counter += 1
87
+
88
+ object_traces[matched_id].append(centroid)
89
+ object_last_seen[matched_id] = time.time()
90
+ current_ids.append(matched_id)
91
+
92
+ trace = object_traces[matched_id]
93
+ duration = time.time() - object_last_seen[matched_id] + 0.001
94
+ speed = calculate_speed(trace, duration)
95
+ direction = calculate_direction(trace)
96
+ direction_changes = count_direction_changes(trace)
97
+ total_move = sum(np.linalg.norm(np.array(trace[i]) - np.array(trace[i-1])) for i in range(1, len(trace)))
98
+
99
+ # ویژگی برای مدل
100
+ feature = [w, h, centroid[0], centroid[1], area, speed, direction_changes]
101
+ label = 1 # کلاس پیش‌فرض: عادی
102
+
103
+ # برچسب‌گذاری خودکار ساده:
104
+ if speed > 100 or direction_changes > 4:
105
+ label = 2 # مشکوک
106
+
107
+ features_set.append(feature)
108
+ labels_set.append(label)
109
+
110
+ # آموزش مدل فقط زمانی که داده‌های کافی وجود داشته باشد
111
+ if len(features_set) > 10 and frame_count % learning_interval == 0:
112
+ knn.fit(features_set, labels_set)
113
+ print("مدل به‌روزرسانی شد.")
114
+
115
+ # فقط پیش‌بینی بعد از آموزش
116
+ if len(features_set) > 10:
117
+ predicted = knn.predict([feature])[0]
118
+
119
+ # رسم اطلاعات روی فریم
120
+ cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0) if label == 1 else (0, 0, 255), 2)
121
+ cv2.circle(frame, centroid, 4, (255, 255, 255), -1)
122
+ cv2.putText(frame, f"ID: {matched_id} | جهت: {direction} | سرعت: {int(speed)}", (x, y - 25),
123
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0), 1)
124
+ cv2.putText(frame, f"رفتار: {'عادی' if predicted == 1 else 'مشکوک'}", (x, y - 5),
125
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 1)
126
+
127
+ frame_count += 1
128
+
129
+ # حذف آی‌دی‌های قدیمی
130
+ for oid in list(object_last_seen):
131
+ if time.time() - object_last_seen[oid] > 2:
132
+ object_traces.pop(oid, None)
133
+ object_last_seen.pop(oid, None)
134
+
135
+ cv2.imshow("هوش رفتاری", frame)
136
+ if cv2.waitKey(1) & 0xFF == 27:
137
+ break
138
+
139
+ cap.release()
140
+ cv2.destroyAllWindows()
opencv_test/untitled9.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import time
4
+ from sklearn.neighbors import KNeighborsClassifier
5
+ from collections import defaultdict, deque
6
+
7
+ # ایجاد پس‌زمینه برای شناسایی حرکت
8
+ back_sub = cv2.createBackgroundSubtractorKNN(history=500, dist2Threshold=400, detectShadows=True)
9
+ cap = cv2.VideoCapture(0)
10
+
11
+ # ذخیره مسیر اشیاء
12
+ object_traces = defaultdict(lambda: deque(maxlen=30)) # آخرین ۳۰ نقطه هر شیء
13
+ object_last_seen = {}
14
+ object_id_counter = 0
15
+
16
+ # برای یادگیری real-time
17
+ knn = KNeighborsClassifier(n_neighbors=3)
18
+ features_set = []
19
+ labels_set = []
20
+ frame_count = 0
21
+ learning_interval = 30
22
+
23
+ # زمان شروع برای جمع‌آوری داده‌ها
24
+ start_time = time.time()
25
+ learning_time_limit = 60 # 1 دقیقه برای جمع‌آوری داده‌ها
26
+
27
+ # متغیر برای جلوگیری از پیش‌بینی قبل از آموزش
28
+ is_trained = False
29
+
30
+ def apply_noise_reduction(mask):
31
+ kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
32
+ mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=2)
33
+ mask = cv2.dilate(mask, kernel, iterations=1)
34
+ return mask
35
+
36
+ def get_centroid(x, y, w, h):
37
+ return (int(x + w / 2), int(y + h / 2))
38
+
39
+ def calculate_direction(trace):
40
+ if len(trace) < 2:
41
+ return "-"
42
+ dx = trace[-1][0] - trace[0][0]
43
+ dy = trace[-1][1] - trace[0][1]
44
+ if abs(dx) > abs(dy):
45
+ return "چپ" if dx < 0 else "راست"
46
+ else:
47
+ return "بالا" if dy < 0 else "پایین"
48
+
49
+ def calculate_speed(trace, duration):
50
+ if len(trace) < 2 or duration == 0:
51
+ return 0
52
+ dist = np.linalg.norm(np.array(trace[-1]) - np.array(trace[0]))
53
+ return dist / duration
54
+
55
+ def count_direction_changes(trace):
56
+ changes = 0
57
+ for i in range(2, len(trace)):
58
+ dx1 = trace[i-1][0] - trace[i-2][0]
59
+ dx2 = trace[i][0] - trace[i-1][0]
60
+ if dx1 * dx2 < 0: # تغییر جهت افقی
61
+ changes += 1
62
+ return changes
63
+
64
+ while True:
65
+ ret, frame = cap.read()
66
+ if not ret:
67
+ break
68
+
69
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
70
+ fg_mask = back_sub.apply(frame)
71
+ fg_mask = apply_noise_reduction(fg_mask)
72
+
73
+ contours, _ = cv2.findContours(fg_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
74
+
75
+ current_ids = []
76
+ predicted = 1 # مقدار پیش‌فرض برای پیش‌بینی (در صورتی که پیش‌بینی انجام نشود)
77
+ for cnt in contours:
78
+ area = cv2.contourArea(cnt)
79
+ if area < 150:
80
+ continue
81
+
82
+ x, y, w, h = cv2.boundingRect(cnt)
83
+ centroid = get_centroid(x, y, w, h)
84
+
85
+ # شناسایی یا ایجاد شناسه جدید
86
+ matched_id = None
87
+ for oid, trace in object_traces.items():
88
+ if np.linalg.norm(np.array(trace[-1]) - np.array(centroid)) < 50:
89
+ matched_id = oid
90
+ break
91
+
92
+ if matched_id is None:
93
+ matched_id = object_id_counter
94
+ object_id_counter += 1
95
+
96
+ object_traces[matched_id].append(centroid)
97
+ object_last_seen[matched_id] = time.time()
98
+ current_ids.append(matched_id)
99
+
100
+ trace = object_traces[matched_id]
101
+ duration = time.time() - object_last_seen[matched_id] + 0.001
102
+ speed = calculate_speed(trace, duration)
103
+ direction = calculate_direction(trace)
104
+ direction_changes = count_direction_changes(trace)
105
+ total_move = sum(np.linalg.norm(np.array(trace[i]) - np.array(trace[i-1])) for i in range(1, len(trace)))
106
+
107
+ # ویژگی برای مدل
108
+ feature = [w, h, centroid[0], centroid[1], area, speed, direction_changes]
109
+ label = 1 # کلاس پیش‌فرض: عادی
110
+
111
+ # برچسب‌گذاری خودکار ساده:
112
+ if speed > 100 or direction_changes > 4:
113
+ label = 2 # مشکوک
114
+
115
+ features_set.append(feature)
116
+ labels_set.append(label)
117
+
118
+ # آموزش مدل فقط زمانی که داده‌های کافی جمع‌آوری شده باشد
119
+ if time.time() - start_time < learning_time_limit:
120
+ # هنوز در مرحله جمع‌آوری داده‌ها هستیم
121
+ continue
122
+ elif not is_trained: # اگر هنوز مدل آموزش داده نشده است
123
+ if len(features_set) > 10:
124
+ knn.fit(features_set, labels_set) # آموزش مدل
125
+ is_trained = True # مدل آموزش داده شد
126
+ print("مدل به‌روزرسانی شد.")
127
+
128
+ # پیش‌بینی فقط پس از آموزش
129
+ if is_trained:
130
+ predicted = knn.predict([feature])[0]
131
+
132
+ # رسم اطلاعات روی فریم
133
+ cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0) if label == 1 else (0, 0, 255), 2)
134
+ cv2.circle(frame, centroid, 4, (255, 255, 255), -1)
135
+ cv2.putText(frame, f"ID: {matched_id} | جهت: {direction} | سرعت: {int(speed)}", (x, y - 25),
136
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0), 1)
137
+ cv2.putText(frame, f"رفتار: {'عادی' if predicted == 1 else 'مشکوک'}", (x, y - 5),
138
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 1)
139
+
140
+ frame_count += 1
141
+
142
+ # حذف آی‌دی‌های قدیمی
143
+ for oid in list(object_last_seen):
144
+ if time.time() - object_last_seen[oid] > 2:
145
+ object_traces.pop(oid, None)
146
+ object_last_seen.pop(oid, None)
147
+
148
+ cv2.imshow("هوش رفتاری", frame)
149
+ if cv2.waitKey(1) & 0xFF == 27:
150
+ break
151
+
152
+ cap.release()
153
+ cv2.destroyAllWindows()