Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +58 -0
- 0dE2T4oBgHgl3EQfiQdl/content/tmp_files/2301.03956v1.pdf.txt +895 -0
- 0dE2T4oBgHgl3EQfiQdl/content/tmp_files/load_file.txt +0 -0
- 1tE0T4oBgHgl3EQfuQHv/vector_store/index.faiss +3 -0
- 1tE0T4oBgHgl3EQfuQHv/vector_store/index.pkl +3 -0
- 29E1T4oBgHgl3EQflwQn/content/2301.03288v1.pdf +3 -0
- 29E1T4oBgHgl3EQflwQn/vector_store/index.pkl +3 -0
- 2NAzT4oBgHgl3EQfe_yo/content/tmp_files/2301.01446v1.pdf.txt +691 -0
- 2NAzT4oBgHgl3EQfe_yo/content/tmp_files/load_file.txt +434 -0
- 2tAzT4oBgHgl3EQfDvpk/content/tmp_files/2301.00981v1.pdf.txt +655 -0
- 2tAzT4oBgHgl3EQfDvpk/content/tmp_files/load_file.txt +405 -0
- 2tE1T4oBgHgl3EQflgTe/content/2301.03287v1.pdf +3 -0
- 2tE1T4oBgHgl3EQflgTe/vector_store/index.faiss +3 -0
- 2tE1T4oBgHgl3EQflgTe/vector_store/index.pkl +3 -0
- 3NE3T4oBgHgl3EQfoQom/content/tmp_files/2301.04631v1.pdf.txt +1043 -0
- 3NE3T4oBgHgl3EQfoQom/content/tmp_files/load_file.txt +0 -0
- 4dAzT4oBgHgl3EQfuv2o/content/2301.01696v1.pdf +3 -0
- 4dAzT4oBgHgl3EQfuv2o/vector_store/index.pkl +3 -0
- 5NE4T4oBgHgl3EQfBQty/vector_store/index.pkl +3 -0
- 99AzT4oBgHgl3EQf_P4t/content/2301.01944v1.pdf +3 -0
- 99AzT4oBgHgl3EQf_P4t/vector_store/index.pkl +3 -0
- 9NFST4oBgHgl3EQfaziV/content/tmp_files/load_file.txt +0 -0
- A9AyT4oBgHgl3EQfRvd3/content/2301.00072v1.pdf +3 -0
- A9AyT4oBgHgl3EQfRvd3/vector_store/index.faiss +3 -0
- A9AyT4oBgHgl3EQfRvd3/vector_store/index.pkl +3 -0
- AdE2T4oBgHgl3EQfRQcf/content/2301.03778v1.pdf +3 -0
- AdE2T4oBgHgl3EQfRQcf/vector_store/index.pkl +3 -0
- AtAzT4oBgHgl3EQf__8r/vector_store/index.pkl +3 -0
- AtE3T4oBgHgl3EQfsgvt/content/tmp_files/2301.04669v1.pdf.txt +0 -0
- AtE3T4oBgHgl3EQfsgvt/content/tmp_files/load_file.txt +0 -0
- BNE4T4oBgHgl3EQf5A7u/content/tmp_files/2301.05320v1.pdf.txt +0 -0
- BNE4T4oBgHgl3EQf5A7u/content/tmp_files/load_file.txt +0 -0
- BdE5T4oBgHgl3EQfTA_5/content/2301.05534v1.pdf +3 -0
- BdE5T4oBgHgl3EQfTA_5/vector_store/index.pkl +3 -0
- BtE2T4oBgHgl3EQf8wk1/content/tmp_files/2301.04221v1.pdf.txt +445 -0
- BtE2T4oBgHgl3EQf8wk1/content/tmp_files/load_file.txt +304 -0
- CdE5T4oBgHgl3EQfTw8s/content/2301.05538v1.pdf +3 -0
- EtE4T4oBgHgl3EQf6w5f/content/tmp_files/2301.05334v1.pdf.txt +1685 -0
- EtE4T4oBgHgl3EQf6w5f/content/tmp_files/load_file.txt +0 -0
- G9E1T4oBgHgl3EQfXQT3/vector_store/index.faiss +3 -0
- G9E1T4oBgHgl3EQfXQT3/vector_store/index.pkl +3 -0
- H9FJT4oBgHgl3EQfFSyX/content/2301.11442v1.pdf +3 -0
- H9FJT4oBgHgl3EQfFSyX/vector_store/index.pkl +3 -0
- HtA0T4oBgHgl3EQfB_9Y/vector_store/index.faiss +3 -0
- K9E1T4oBgHgl3EQfYwRv/vector_store/index.faiss +3 -0
- LtE2T4oBgHgl3EQfBAZB/vector_store/index.faiss +3 -0
- LtE3T4oBgHgl3EQfAwm8/content/2301.04261v1.pdf +3 -0
- NNE2T4oBgHgl3EQfqwgI/content/2301.04041v1.pdf +3 -0
- OtAzT4oBgHgl3EQfzf59/vector_store/index.pkl +3 -0
- PNFKT4oBgHgl3EQfgS6S/content/2301.11833v1.pdf +3 -0
.gitattributes
CHANGED
@@ -6352,3 +6352,61 @@ l9E2T4oBgHgl3EQfJAaa/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -tex
|
|
6352 |
ZtFJT4oBgHgl3EQf7i35/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
6353 |
EdE2T4oBgHgl3EQfSgfT/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
6354 |
CdE1T4oBgHgl3EQfpwUV/content/2301.03334v1.pdf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6352 |
ZtFJT4oBgHgl3EQf7i35/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
6353 |
EdE2T4oBgHgl3EQfSgfT/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
6354 |
CdE1T4oBgHgl3EQfpwUV/content/2301.03334v1.pdf filter=lfs diff=lfs merge=lfs -text
|
6355 |
+
hNFKT4oBgHgl3EQfuC7r/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
6356 |
+
UNE2T4oBgHgl3EQftQjd/content/2301.04069v1.pdf filter=lfs diff=lfs merge=lfs -text
|
6357 |
+
ZtAzT4oBgHgl3EQfm_2G/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
6358 |
+
kdE4T4oBgHgl3EQftA0V/content/2301.05220v1.pdf filter=lfs diff=lfs merge=lfs -text
|
6359 |
+
K9E1T4oBgHgl3EQfYwRv/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
6360 |
+
4dAzT4oBgHgl3EQfuv2o/content/2301.01696v1.pdf filter=lfs diff=lfs merge=lfs -text
|
6361 |
+
LtE2T4oBgHgl3EQfBAZB/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
6362 |
+
TNE2T4oBgHgl3EQfWwfK/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
6363 |
+
NNE2T4oBgHgl3EQfqwgI/content/2301.04041v1.pdf filter=lfs diff=lfs merge=lfs -text
|
6364 |
+
ttE3T4oBgHgl3EQfNQnH/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
6365 |
+
hNFKT4oBgHgl3EQfuC7r/content/2301.11890v1.pdf filter=lfs diff=lfs merge=lfs -text
|
6366 |
+
qdE5T4oBgHgl3EQflA_S/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
6367 |
+
qtFST4oBgHgl3EQfOzjA/content/2301.13753v1.pdf filter=lfs diff=lfs merge=lfs -text
|
6368 |
+
j9E4T4oBgHgl3EQfTQyV/content/2301.05006v1.pdf filter=lfs diff=lfs merge=lfs -text
|
6369 |
+
l9E2T4oBgHgl3EQfJAaa/content/2301.03687v1.pdf filter=lfs diff=lfs merge=lfs -text
|
6370 |
+
XtFOT4oBgHgl3EQf9DQS/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
6371 |
+
H9FJT4oBgHgl3EQfFSyX/content/2301.11442v1.pdf filter=lfs diff=lfs merge=lfs -text
|
6372 |
+
CdE5T4oBgHgl3EQfTw8s/content/2301.05538v1.pdf filter=lfs diff=lfs merge=lfs -text
|
6373 |
+
m9FKT4oBgHgl3EQfEy02/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
6374 |
+
z9E2T4oBgHgl3EQfMwaS/content/2301.03728v1.pdf filter=lfs diff=lfs merge=lfs -text
|
6375 |
+
Y9E2T4oBgHgl3EQfEgYU/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
6376 |
+
z9E2T4oBgHgl3EQfMwaS/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
6377 |
+
ZNE5T4oBgHgl3EQfDA6r/content/2301.05402v1.pdf filter=lfs diff=lfs merge=lfs -text
|
6378 |
+
ZNE5T4oBgHgl3EQfDA6r/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
6379 |
+
PdE0T4oBgHgl3EQfTwBe/content/2301.02240v1.pdf filter=lfs diff=lfs merge=lfs -text
|
6380 |
+
rdE5T4oBgHgl3EQflg-S/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
6381 |
+
mdFAT4oBgHgl3EQfcR2T/content/2301.08563v1.pdf filter=lfs diff=lfs merge=lfs -text
|
6382 |
+
PNFKT4oBgHgl3EQfgS6S/content/2301.11833v1.pdf filter=lfs diff=lfs merge=lfs -text
|
6383 |
+
G9E1T4oBgHgl3EQfXQT3/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
6384 |
+
w9FST4oBgHgl3EQfRzg4/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
6385 |
+
WtFLT4oBgHgl3EQfTC_4/content/2301.12044v1.pdf filter=lfs diff=lfs merge=lfs -text
|
6386 |
+
LtE3T4oBgHgl3EQfAwm8/content/2301.04261v1.pdf filter=lfs diff=lfs merge=lfs -text
|
6387 |
+
r9E4T4oBgHgl3EQfwg0d/content/2301.05250v1.pdf filter=lfs diff=lfs merge=lfs -text
|
6388 |
+
s9AzT4oBgHgl3EQfPfs9/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
6389 |
+
kdE4T4oBgHgl3EQftA0V/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
6390 |
+
QtE0T4oBgHgl3EQfkQGy/content/2301.02471v1.pdf filter=lfs diff=lfs merge=lfs -text
|
6391 |
+
AdE2T4oBgHgl3EQfRQcf/content/2301.03778v1.pdf filter=lfs diff=lfs merge=lfs -text
|
6392 |
+
Y9E2T4oBgHgl3EQfEgYU/content/2301.03635v1.pdf filter=lfs diff=lfs merge=lfs -text
|
6393 |
+
99AzT4oBgHgl3EQf_P4t/content/2301.01944v1.pdf filter=lfs diff=lfs merge=lfs -text
|
6394 |
+
BdE5T4oBgHgl3EQfTA_5/content/2301.05534v1.pdf filter=lfs diff=lfs merge=lfs -text
|
6395 |
+
PNFKT4oBgHgl3EQfgS6S/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
6396 |
+
A9AyT4oBgHgl3EQfRvd3/content/2301.00072v1.pdf filter=lfs diff=lfs merge=lfs -text
|
6397 |
+
mdFIT4oBgHgl3EQftCvn/content/2301.11338v1.pdf filter=lfs diff=lfs merge=lfs -text
|
6398 |
+
hNAyT4oBgHgl3EQfxfmy/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
6399 |
+
2tE1T4oBgHgl3EQflgTe/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
6400 |
+
29E1T4oBgHgl3EQflwQn/content/2301.03288v1.pdf filter=lfs diff=lfs merge=lfs -text
|
6401 |
+
WtFLT4oBgHgl3EQfTC_4/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
6402 |
+
gtE4T4oBgHgl3EQfRwx9/content/2301.04993v1.pdf filter=lfs diff=lfs merge=lfs -text
|
6403 |
+
V9E1T4oBgHgl3EQfbQST/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
6404 |
+
fdE1T4oBgHgl3EQfygXR/content/2301.03435v1.pdf filter=lfs diff=lfs merge=lfs -text
|
6405 |
+
A9AyT4oBgHgl3EQfRvd3/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
6406 |
+
Y9E4T4oBgHgl3EQfOAzo/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
6407 |
+
tdE2T4oBgHgl3EQffge_/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
6408 |
+
mdFIT4oBgHgl3EQftCvn/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
6409 |
+
vNFKT4oBgHgl3EQf3y7o/content/2301.11931v1.pdf filter=lfs diff=lfs merge=lfs -text
|
6410 |
+
2tE1T4oBgHgl3EQflgTe/content/2301.03287v1.pdf filter=lfs diff=lfs merge=lfs -text
|
6411 |
+
1tE0T4oBgHgl3EQfuQHv/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
6412 |
+
HtA0T4oBgHgl3EQfB_9Y/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
0dE2T4oBgHgl3EQfiQdl/content/tmp_files/2301.03956v1.pdf.txt
ADDED
@@ -0,0 +1,895 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Towards High-Definition Maps: a Framework Leveraging Semantic
|
2 |
+
Segmentation to Improve NDT Map Compression and Descriptivity
|
3 |
+
Petri Manninen1, Heikki Hyyti1, Ville Kyrki2, Jyri Maanp¨a¨a1, Josef Taher1 and Juha Hyypp¨a1
|
4 |
+
Abstract— High-Definition (HD) maps are needed for robust
|
5 |
+
navigation of autonomous vehicles, limited by the on-board
|
6 |
+
storage capacity. To solve this, we propose a novel frame-
|
7 |
+
work, Environment-Aware Normal Distributions Transform
|
8 |
+
(EA-NDT), that significantly improves compression of standard
|
9 |
+
NDT map representation. The compressed representation of
|
10 |
+
EA-NDT is based on semantic-aided clustering of point clouds
|
11 |
+
resulting in more optimal cells compared to grid cells of
|
12 |
+
standard NDT. To evaluate EA-NDT, we present an open-source
|
13 |
+
implementation that extracts planar and cylindrical primitive
|
14 |
+
features from a point cloud and further divides them into
|
15 |
+
smaller cells to represent the data as an EA-NDT HD map. We
|
16 |
+
collected an open suburban environment dataset and evaluated
|
17 |
+
EA-NDT HD map representation against the standard NDT
|
18 |
+
representation. Compared to the standard NDT, EA-NDT
|
19 |
+
achieved consistently at least 1.5× higher map compression
|
20 |
+
while maintaining the same descriptive capability. Moreover,
|
21 |
+
we showed that EA-NDT is capable of producing maps with
|
22 |
+
significantly higher descriptivity score when using the same
|
23 |
+
number of cells than the standard NDT.
|
24 |
+
I. INTRODUCTION
|
25 |
+
The current development of mobile robots and the on-
|
26 |
+
going competition for the crown of autonomous driving
|
27 |
+
has increased the demand of accurate positioning services.
|
28 |
+
Generally, Global Navigation Satellite System (GNSS) can
|
29 |
+
be used to measure global position of a mobile robot but the
|
30 |
+
accuracy of satellite navigation alone is typically around a
|
31 |
+
few meters and because of signal obstruction the satellite
|
32 |
+
signals may be unavailable [1], [2]. Alternatively, global
|
33 |
+
position can be solved by fitting the current sensor view
|
34 |
+
into an existing georeferenced map, that can be computed
|
35 |
+
e.g. with Simultaneous Localization and Mapping (SLAM)
|
36 |
+
[3]. Moreover, map-based technique provides a combined
|
37 |
+
position and rotation estimate in contrast to a global position
|
38 |
+
measured by GNSS.
|
39 |
+
Maps used in autonomous driving are typically called
|
40 |
+
High-Definition (HD) maps [4], [5]. Data compression of
|
41 |
+
HD maps is of high importance within many applications
|
42 |
+
that have limited computational resources and storage capac-
|
43 |
+
ity [6]–[8]. Moreover, real-time localization requires com-
|
44 |
+
pressed maps to ensure fast processing capability.
|
45 |
+
*This work was supported by Academy of Finland, decisions 337656,
|
46 |
+
319011, 318437 and by Henry Ford foundation Finland.
|
47 |
+
1P. Manninen, H. Hyyti, J. Maanp¨a¨a, J. Taher, J. Hyypp¨a are
|
48 |
+
with
|
49 |
+
Department
|
50 |
+
of
|
51 |
+
Remote
|
52 |
+
Sensing
|
53 |
+
and
|
54 |
+
Photogrammetry,
|
55 |
+
Finnish
|
56 |
+
Geospatial Research Institute (FGI), National Land Survey of Finland
|
57 |
+
(NLS),
|
58 |
+
02150
|
59 |
+
Espoo,
|
60 |
+
Finland
|
61 |
+
petri.manninen@nls.fi,
|
62 |
+
heikki.hyyti@nls.fi, jyri.maanpaa@nls.fi,
|
63 |
+
josef.taher@nls.fi, juha.hyyppa@nls.fi
|
64 |
+
2V. Kyrki is with School of Electrical Engineering, Aalto University,
|
65 |
+
02150 Espoo, Finland ville.kyrki@aalto.fi
|
66 |
+
Fig. 1: An illustration of a point cloud (white) and corre-
|
67 |
+
sponding EA-NDT HD map representation. EA-NDT cells
|
68 |
+
are visualized with ellipsoids (mass within a standard de-
|
69 |
+
viation) presenting building (yellow), fence (cyan), ground
|
70 |
+
(purple), pole (blue), tree trunk (orange) and traffic sign (red)
|
71 |
+
labels.
|
72 |
+
Since positioning in real-time with raw point clouds is
|
73 |
+
infeasible, alternative methods have been developed to over-
|
74 |
+
come the problem [6], [9]–[12]. One promising approach is
|
75 |
+
Normal Distributions transform (NDT) [9]. NDT compresses
|
76 |
+
the three-dimensional point cloud data by dividing the cloud
|
77 |
+
into equal sized cubical cells that are expressed by their mean
|
78 |
+
and covariance. To improve the scan registration, Semantic-
|
79 |
+
assisted Normal Distributions Transform (SE-NDT) [12]
|
80 |
+
expanded the original NDT with semantic information.
|
81 |
+
However, both NDT and SE-NDT use a grid structure for
|
82 |
+
the division of the point cloud, and therefore cannot find
|
83 |
+
the fundamental geometrical structure of the environment
|
84 |
+
(e.g. boundaries between object surfaces). Consequently, this
|
85 |
+
results in an NDT representation where part of the cells have
|
86 |
+
a high variance in all three dimensions. Magnusson [9] also
|
87 |
+
presented that the point cloud can alternatively be divided
|
88 |
+
by K-means clustering [13] and that it improves the scan
|
89 |
+
registration compared to using a grid structure.
|
90 |
+
In this work, we address the aforementioned problem of
|
91 |
+
sub-optimal point cloud division. We propose to solve the
|
92 |
+
problem by leveraging semantic-aided clustering. We present
|
93 |
+
a novel framework called Environment-Aware NDT (EA-
|
94 |
+
NDT) (illustrated in Fig. 1), which provides EA-NDT HD
|
95 |
+
Map representation. EA-NDT HD Map is a compressed
|
96 |
+
representation of a point cloud that is based on the leaf cell
|
97 |
+
representation of the standard NDT, and therefore NDT scan
|
98 |
+
registration technique is directly applicable with EA-NDT. In
|
99 |
+
this work, the standard NDT is referred as NDT. In contrast
|
100 |
+
to the grid structure of NDT, EA-NDT leverages semantic
|
101 |
+
information to cluster planar and cylindrical primitives of the
|
102 |
+
©2022 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including
|
103 |
+
reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any
|
104 |
+
copyrighted component of this work in other works. DOI: 10.1109/IROS47612.2022.9982050
|
105 |
+
arXiv:2301.03956v1 [cs.RO] 10 Jan 2023
|
106 |
+
|
107 |
+
environment to provide a more optimal NDT cell division.
|
108 |
+
In EA-NDT HD Map, each cell only consists of points that
|
109 |
+
model the same basic geometrical shape such as a plane
|
110 |
+
or a pole. Moreover, by adding understanding of semantic
|
111 |
+
information in the scene, we can compute a map containing
|
112 |
+
only stable objects that are useful for accurate localization.
|
113 |
+
The main contributions of this paper are:
|
114 |
+
1) A novel data-driven framework to compute NDT map
|
115 |
+
representation without the grid structure.
|
116 |
+
2) Demonstration of significantly improved data compres-
|
117 |
+
sion compared to NDT representation.
|
118 |
+
3) An open-source implementation1 of the proposed EA-
|
119 |
+
NDT is shared for the community.
|
120 |
+
4) A registered dataset2 to evaluate the proposed EA-NDT
|
121 |
+
on data collected with Velodyne VLS-128 LiDAR.
|
122 |
+
The rest of the paper is organized as follows: The next
|
123 |
+
section describes the related work in the fields of HD Maps,
|
124 |
+
scan registration, SLAM, and point cloud semantic segmen-
|
125 |
+
tation. In Section III, we formalize a pipeline architecture
|
126 |
+
of the proposed framework to extract planar and cylindrical
|
127 |
+
primitives of the point cloud. The implementation details of
|
128 |
+
our proof of concept solution are explained in Section IV
|
129 |
+
together with an introduction to the data collection setup and
|
130 |
+
preprocessed dataset, and the evaluation metrics used for the
|
131 |
+
experiment. In Section V, we compare the proposed EA-
|
132 |
+
NDT to a map computed with NDT and show that EA-NDT
|
133 |
+
provides a significant map compression while maintaining
|
134 |
+
the same descriptive capability. Finally, in Section VI we
|
135 |
+
consider the advantages and disadvantages of EA-NDT and
|
136 |
+
provide a discussion over the validity, reliability and gener-
|
137 |
+
alizability of the experiment.
|
138 |
+
II. RELATED WORK
|
139 |
+
HD Maps are one of the key techniques to enable au-
|
140 |
+
tonomous driving [5]. Seif and Hu [4] have recognized three
|
141 |
+
challenges to be solved with an HD map: the localization
|
142 |
+
of the vehicle, reacting to events beyond sight, and driving
|
143 |
+
according to the needs of the traffic. In this work, we focus
|
144 |
+
in the localization task. An HD Map can be computed e.g.
|
145 |
+
with SLAM that is a well established and profoundly studied
|
146 |
+
problem about how to align subsequent sensor measurements
|
147 |
+
to incrementally compute a map of the surrounding environ-
|
148 |
+
ment while simultaneously localizing the sensor [14]. In the
|
149 |
+
review by Bresson et al [14], they found that an accuracy
|
150 |
+
of 10 cm has been reported for the built maps but even an
|
151 |
+
accuracy of 2 cm is possible.
|
152 |
+
Data compression is a crucial challenge for HD maps in
|
153 |
+
large environments. For example, the well known Iterative
|
154 |
+
Closest Point (ICP) [15] algorithm is infeasible in large
|
155 |
+
point clouds due to the computational cost of finding closest
|
156 |
+
corresponding points across a measurement and a map. To
|
157 |
+
improve the computational problems of ICP, Magnusson
|
158 |
+
proposed Point-to-Distribution (P2D)-NDT in which the
|
159 |
+
reference cloud is divided by a fixed sized 3D grid into
|
160 |
+
1https://gitlab.com/fgi_nls/public/hd-map
|
161 |
+
2https://doi.org/10.5281/zenodo.6796874
|
162 |
+
cells modelled by the mean and covariance of the points
|
163 |
+
[9]. In (P2D)-NDT, each point in the registered scan is
|
164 |
+
fitted to the cells within a local neighbourhood of the point.
|
165 |
+
In addition to robust scan registration, NDT representation
|
166 |
+
provides data compression together with faster registration.
|
167 |
+
Stoyanov et al. presented Distribution-to-Distribution (D2D)-
|
168 |
+
NDT that further develops the P2D-NDT to likewise model
|
169 |
+
the registered scan with normal distributions [10].
|
170 |
+
Semantic information can enhance scan registration perfor-
|
171 |
+
mance of NDT. For example, Semantic-assisted NDT (SE-
|
172 |
+
NDT) [11], proposed by Zaganidis et al., showed that the use
|
173 |
+
of even two semantic labels (edges and planes) can improve
|
174 |
+
the scan registration. To further develop SE-NDT, Zaganidis
|
175 |
+
et al. presented a complete semantic registration pipeline that
|
176 |
+
uses a deep neural network for semantic segmentation of the
|
177 |
+
point cloud [12]. SE-NDT uses the 3D grid cell structure
|
178 |
+
of NDT but models each semantic label separately to utilize
|
179 |
+
the division of similar entities in the registration task. For
|
180 |
+
semantic segmentation, SE-NDT uses PointNet [16] that is
|
181 |
+
a pioneering solution of a point cloud segmentation network
|
182 |
+
that consumes raw point cloud data without voxelization
|
183 |
+
or rendering. Cho et al. proposed that the uncertainty of
|
184 |
+
semantic information could also be used in the registration
|
185 |
+
task [17].
|
186 |
+
Semantic information can be utilized further than was
|
187 |
+
proposed in the previous works. In this work, we propose
|
188 |
+
to replace the aforementioned grid division by leveraging
|
189 |
+
semantic-aided clustering that finds planar and cylindrical
|
190 |
+
structures of a point cloud. For semantic segmentation,
|
191 |
+
we use Random sampling and an effective Local feature
|
192 |
+
Aggregator-Net (RandLA-Net) [18] that presents a new local
|
193 |
+
feature aggregation module to support random sampling that
|
194 |
+
was found a suitable technique for semantic segmentation
|
195 |
+
of large scale point clouds. They have reported up to 200×
|
196 |
+
faster processing capacity compared to the existing solutions.
|
197 |
+
A further review of semantic segmentation of point cloud
|
198 |
+
data is available e.g. in [19].
|
199 |
+
III. ENVIRONMENT-AWARE NDT
|
200 |
+
Here we propose a framework, called Environment-Aware
|
201 |
+
NDT (EA-NDT), to divide a semantically segmented point
|
202 |
+
cloud into NDT cells. The proposed framework is a straight
|
203 |
+
pipeline process consisting of 4 stages (Fig. 3) that step-
|
204 |
+
by-step divide the input point cloud into cells which are
|
205 |
+
ultimately represented as an NDT map. The input of the
|
206 |
+
pipeline is a Registered Point Cloud, which is processed
|
207 |
+
in the following order by stages called Semantic Seg-
|
208 |
+
mentation, Instance Clustering, Primitive Extraction and
|
209 |
+
Cell Clustering. Finally, the output of the pipeline is
|
210 |
+
an environment-aware NDT-based HD map representation,
|
211 |
+
called EA-NDT HD Map, which stores the found cells using
|
212 |
+
NDT representation.
|
213 |
+
In the Registered Point Cloud each 3D point has X,
|
214 |
+
Y, Z Cartesian coordinate (e.g. ETRS-TM35FIN, ECEF)
|
215 |
+
and an intensity value. Semantic Segmentation appends
|
216 |
+
semantic information for each point in the cloud to enable
|
217 |
+
further clustering of the data. In this work, we used road,
|
218 |
+
|
219 |
+
sidewalk, parking, building, fence, pole, traffic sign, and
|
220 |
+
tree trunk labels to demonstrate the framework but also
|
221 |
+
other labels could be used. Instance Clustering divides
|
222 |
+
each semantic segment into instances that are spatially sep-
|
223 |
+
arated from each other. Primitive Extraction divides each
|
224 |
+
instance into predefined primitives that can be modeled with
|
225 |
+
an unimodal distribution. In this work, we have defined
|
226 |
+
planar and cylindrical primitives but the framework could
|
227 |
+
be extended to support new types of primitives. However,
|
228 |
+
large primitives such as trees can not be modelled well with
|
229 |
+
an uniform distribution. Therefore, Cell Clustering further
|
230 |
+
divides each primitive into cells of approximately equal size
|
231 |
+
while minimizing the number of used cells. Ultimately, EA-
|
232 |
+
NDT HD Map is presented as an octree [20] that in this
|
233 |
+
work stores the point counter, point sum and upper diagonal
|
234 |
+
of the covariance matrix for each cell but other attributes
|
235 |
+
such as semantic segment, instance cluster or primitive type
|
236 |
+
could be included.
|
237 |
+
IV. METHODS AND EXPERIMENTS
|
238 |
+
To demonstrate the proposed framework to build an EA-
|
239 |
+
NDT HD Map, we used a dataset collected with Velodyne
|
240 |
+
VLS-128 Alpha Puck [21] LiDAR 7th of September 2020 in
|
241 |
+
a suburban environment in the area of K¨apyl¨a in Helsinki, the
|
242 |
+
capital of Finland. The environment in the dataset consists of
|
243 |
+
a straight two-way asphalt street, called Pohjolankatu, which
|
244 |
+
starts from a larger controlled intersection at the crossing of
|
245 |
+
Tuusulanv¨ayl¨a (60.213326° N, 24.942908° E in WGS84) and
|
246 |
+
passes by three smaller uncontrolled intersections until the
|
247 |
+
crossing of Metsolantie (60.215537° N, 24.950065° E). It is
|
248 |
+
a typical suburban street with tram lines, sidewalks, small
|
249 |
+
buildings, traffic signs, light poles, and cars parked on both
|
250 |
+
sides of the streets. To collect a reference trajectory and to
|
251 |
+
synchronize the LiDAR measurements, we have used a No-
|
252 |
+
vatel PwrPak7-E1 GNSS Inertial Navigation System (INS)
|
253 |
+
[22]. The sensors were installed on a Ford Mondeo Hybrid
|
254 |
+
research platform named Autonomous Research Vehicle Ob-
|
255 |
+
servatory (ARVO) [23]. The sensors were interfaced through
|
256 |
+
Robotic Operation System (ROS) [24] version Kinetic Kame
|
257 |
+
and the sensor measurements were saved in rosbag format
|
258 |
+
for further processing.
|
259 |
+
A. Preprocessed Dataset
|
260 |
+
Our open preprocessed dataset2, shown in Fig. 2, consists
|
261 |
+
of a two-way asphalt paved street with a tram line to both
|
262 |
+
directions and sidewalks in both sides of the street. The
|
263 |
+
length of the dataset trajectory is around 640 m and it has
|
264 |
+
Fig. 2: The complete dataset visualized with semantic labels
|
265 |
+
in different colors: road (magenta), sidewalk (violet), park-
|
266 |
+
ing (pink), terrain (green), buildings (yellow), fence (light
|
267 |
+
brown), tree trunk (brown), traffic sign (red), pole (grey).
|
268 |
+
TABLE I: RandLA-Net classified dataset label proportions.
|
269 |
+
Semantic label
|
270 |
+
No. of points
|
271 |
+
% of all
|
272 |
+
% of used
|
273 |
+
Ground
|
274 |
+
14,052,836
|
275 |
+
34.7
|
276 |
+
50.8
|
277 |
+
Building
|
278 |
+
7,650,980
|
279 |
+
18.9
|
280 |
+
27.7
|
281 |
+
Tree trunk
|
282 |
+
3,560,910
|
283 |
+
8.8
|
284 |
+
12.9
|
285 |
+
Fence
|
286 |
+
2,120,849
|
287 |
+
5.2
|
288 |
+
7.7
|
289 |
+
Pole
|
290 |
+
193,516
|
291 |
+
0.5
|
292 |
+
0.7
|
293 |
+
Traffic sign
|
294 |
+
82,680
|
295 |
+
0.2
|
296 |
+
0.3
|
297 |
+
Labels used here
|
298 |
+
27,661,771
|
299 |
+
68.4
|
300 |
+
100.0
|
301 |
+
Others
|
302 |
+
12,799,904
|
303 |
+
31.6
|
304 |
+
Total
|
305 |
+
40,461,675
|
306 |
+
100.0
|
307 |
+
in total more than 40 million points from which 28 million
|
308 |
+
are used in this work. All the intersections together have
|
309 |
+
a plenty of traffic signs. The sidewalks are separated from
|
310 |
+
the road by a row of tall planted trees. The dataset contains
|
311 |
+
nearly 30 buildings that are mostly wooden and there are
|
312 |
+
several fences between the houses. Our dataset includes all
|
313 |
+
the semantic labels classified by RandLA-Net [18] but in this
|
314 |
+
work we have used only road, sidewalk, parking, building,
|
315 |
+
fence, tree trunk, traffic sign, and pole labels. In this work,
|
316 |
+
road, sidewalk, and parking labels were reassigned into a
|
317 |
+
common ground label. The proportion and the number of
|
318 |
+
the points of each label are shown in Table I. Half of the
|
319 |
+
used points consist of ground and roughly a fourth represent
|
320 |
+
buildings whereas poles and traffic signs together represent
|
321 |
+
only 1 %. Tree trunks and fences together represent a fifth
|
322 |
+
of the used points. The preprocessing of the data consists
|
323 |
+
of three steps; semantic segmentation, scan registration, and
|
324 |
+
data filtering.
|
325 |
+
In semantic segmentation of scans, we used a RandLA-Net
|
326 |
+
model pre-trained with SemanticKITTI dataset which was
|
327 |
+
collected with Velodyne HDL-64 LiDAR [25]. Instead, we
|
328 |
+
used VLS-128 which has a longer range and 128 laser beams
|
329 |
+
instead of 64 [21]. Also, VLS-128 has a wider field of view
|
330 |
+
(FOV) in vertical direction. Consequently, the measurements
|
331 |
+
outside of the vertical FOV of HDL-64 were constantly
|
332 |
+
misclassified so only the measurements within HDL-64 FOV
|
333 |
+
were used. RandLA-Net outputs a probability estimate vector
|
334 |
+
of labels for each point. In this work, we call it as label
|
335 |
+
probabilities.
|
336 |
+
In scan registration, the motion deformation of each scan
|
337 |
+
was first fixed according to a GNSS INS trajectory post-
|
338 |
+
processed with Novatel Inertial Explorer [26], after which
|
339 |
+
P2D-NDT implementation [27] with 1 m grid cell size was
|
340 |
+
used for registration. In registration, a local map of 5 last
|
341 |
+
keyframes was used as a target cloud and a motion threshold
|
342 |
+
of 10 cm was used to add a new keyframe. Grid cells
|
343 |
+
containing points from a single ring of the LiDAR were
|
344 |
+
ignored in the registration. Moreover, points that were con-
|
345 |
+
sidered possibly unreliable (vehicles, bicycles, pedestrians
|
346 |
+
and vegetation) or further than 50 m away from the LiDAR,
|
347 |
+
were ignored.
|
348 |
+
After the scan registration, the dense Registered Point
|
349 |
+
Cloud was voxel filtered to average X, Y, Z position and
|
350 |
+
|
351 |
+
Fig. 3: A visualization of the proposed EA-NDT processing pipeline that is based on the framework in Section III. The input
|
352 |
+
is a semantically segmented point cloud and the intermediate phases before EA-NDT HD Map are instances, primitives and
|
353 |
+
cells, in which the entities are separated by color. The color mapping of semantic information is explained in Fig. 1.
|
354 |
+
label probabilities of each 1 cm voxel. To smoothen the
|
355 |
+
semantic segmentation, the label probabilities of each point
|
356 |
+
was averaged within a radius of 5 cm.
|
357 |
+
B. The Implementation
|
358 |
+
A method1, based on the framework presented in Sec-
|
359 |
+
tion III, was implemented in C++14 on top of ROS Noetic
|
360 |
+
Ninjemys. The main functionality of the implementation uses
|
361 |
+
existing functions and classes of Point Cloud Library (PCL)
|
362 |
+
[28]. The implemented processing pipeline is demonstrated
|
363 |
+
in Fig. 3. The 1st stage of the framework, Semantic Segmen-
|
364 |
+
tation, is explained in Section IV-A. Therefore, our dataset
|
365 |
+
already includes the semantic information.
|
366 |
+
Instance clustering was implemented with Euclidean
|
367 |
+
region growing algorithm [29] to divide each semantic seg-
|
368 |
+
ment into spatially separate instances shown in Fig. 3. In
|
369 |
+
general, we require a distance threshold of 30 cm between
|
370 |
+
the instances and a minimum of 10 points per instance.
|
371 |
+
For ground label we require a distance threshold of 50 cm
|
372 |
+
between the instances and a minimum of 3000 points per
|
373 |
+
instance. In our dataset, there is a significant amount of
|
374 |
+
outliers and reflected points below the ground plane that are
|
375 |
+
undesired in a map, Instance clustering is used to filter those
|
376 |
+
points.
|
377 |
+
In Primitive Extraction, tree trunk and pole instances
|
378 |
+
are modeled as individual cylindrical primitives, and traffic
|
379 |
+
sign instances as individual planar primitives. Both primitive
|
380 |
+
types are shown in Fig. 3. For other semantic labels, planar
|
381 |
+
primitives were extracted by Random Sample Consensus
|
382 |
+
(RANSAC) [30] based normal plane fitting algorithm after
|
383 |
+
subsampling the instance with an averaging 10 cm voxel grid
|
384 |
+
and estimating the point normals for each remaining point
|
385 |
+
from 26 nearest neighbours. For building and fence instances,
|
386 |
+
a normal distance weight of π/4 and a distance threshold of
|
387 |
+
15 cm was used for plane fitting. For ground instances, the
|
388 |
+
procedure differs slightly: 1) an existing implementation [31]
|
389 |
+
of K-means++ algorithm [32] was used to divide the ground
|
390 |
+
instances into primitives with an area of approximately 100
|
391 |
+
m² (The initialization of number of K-means clusters is
|
392 |
+
explained later in this section in Cell Clustering), after
|
393 |
+
which 2) the plane fitting was performed for each primitive
|
394 |
+
with a 30 cm distance threshold for a coarse noise filtering.
|
395 |
+
In Cell Clustering, primitives are divided into cells
|
396 |
+
(shown in Fig. 3) with K-means++ algorithm for which the
|
397 |
+
number of clusters
|
398 |
+
NL = ⌈fLnL
|
399 |
+
gL⌉
|
400 |
+
(1)
|
401 |
+
is initialized for each label L. In (1), ⌈·⌉ is the ceiling
|
402 |
+
operator, nL is either nα for cylindrical primitives (tree
|
403 |
+
trunk and pole) or nβ for planar primitives (ground, building,
|
404 |
+
fence, and traffic sign):
|
405 |
+
nα = lα
|
406 |
+
�
|
407 |
+
sc
|
408 |
+
and
|
409 |
+
nβ = Aβ
|
410 |
+
�
|
411 |
+
sc
|
412 |
+
2,
|
413 |
+
(2)
|
414 |
+
TABLE II: The final values of the scaling parameters
|
415 |
+
Semantic label (L)
|
416 |
+
fL
|
417 |
+
gL
|
418 |
+
Ground
|
419 |
+
1.680
|
420 |
+
0.083
|
421 |
+
Building
|
422 |
+
2.708
|
423 |
+
0.137
|
424 |
+
Tree trunk
|
425 |
+
4.179
|
426 |
+
0.318
|
427 |
+
Fence
|
428 |
+
2.248
|
429 |
+
−0.788
|
430 |
+
Pole
|
431 |
+
1.687
|
432 |
+
−0.315
|
433 |
+
Traffic sign
|
434 |
+
3.923
|
435 |
+
0.317
|
436 |
+
Fig. 4: The number of cells Nc after fitting EA-NDT with
|
437 |
+
NDT shown w.r.t. cell size sc, color indicates the method,
|
438 |
+
line style the label, and green background the fitted range.
|
439 |
+
|
440 |
+
Fig. 5: The complete map descriptivity score Sd compared
|
441 |
+
w.r.t. number of cells Nc. The violet line depicts the com-
|
442 |
+
putation of the NDT compression efficiency η for each Sd.
|
443 |
+
where lα is the length of a cylindrical primitive and Aβ is
|
444 |
+
the number of points remaining after projecting the planar
|
445 |
+
primitive into the eigenspace found by principal component
|
446 |
+
analysis (PCA) and filtering with a 10 cm voxel grid.
|
447 |
+
Additionally, after clustering cells in ground, a plane fitting
|
448 |
+
with a 15 cm threshold is performed for each cell for a finer
|
449 |
+
noise filtering. In (1), scaling parameters fL and gL, shown
|
450 |
+
in Table II, were manually fitted for each L over 6 iterations
|
451 |
+
starting from fL0 = 1 and gL0 = 1 until the number of cells
|
452 |
+
Nc for EA-NDT (shown in Fig. 4) was sufficiently close to
|
453 |
+
NDT with cell size sc < 1 m. Despite the cell size, each
|
454 |
+
primitive is required to have at least one cell. Fig. 4 reveals
|
455 |
+
how this sets a lower boundary for Nc with larger cells.
|
456 |
+
Finally, all the computed cells were stored into an octree
|
457 |
+
structure [20] that represents the EA-NDT HD Map. Each
|
458 |
+
leaf cell in the octree stores a point counter, point sum
|
459 |
+
and upper diagonal of the covariance matrix for the cell.
|
460 |
+
We require a minimum of 6 points for a leaf cell to be
|
461 |
+
modeled reliably with a normal distribution, hence cells with
|
462 |
+
less points are ignored. The octree implementation in PCL
|
463 |
+
requires a minimum leaf cell size parameter, we used sc/4
|
464 |
+
to make it sufficiently smaller compared to the required cell
|
465 |
+
size of EA-NDT.
|
466 |
+
C. Evaluation
|
467 |
+
Here, a descriptivity score Sd, in which a higher score
|
468 |
+
denotes higher similarity, is defined to evaluate how well the
|
469 |
+
map models the raw point cloud. It is derived using a density
|
470 |
+
function of a multivariate normal distribution [33], which is
|
471 |
+
defined for each 3D point xi and jth NDT cell as
|
472 |
+
fj(xi) =
|
473 |
+
1
|
474 |
+
�
|
475 |
+
(2π)k|Σj|
|
476 |
+
e(− 1
|
477 |
+
2 (xi−µj)TΣ−1
|
478 |
+
j
|
479 |
+
(xi−µj)).
|
480 |
+
(3)
|
481 |
+
In (3), µj is the mean vector of a distribution with a
|
482 |
+
covariance matrix Σj for jth NDT cell, | · | is the determinant
|
483 |
+
operator and k = 3 describes dimension of the multivariate
|
484 |
+
distribution. Restricting to the local neighborhood of each
|
485 |
+
Np point, the descriptivity score Sd is an average density of
|
486 |
+
Fig. 6: An alternative comparison of the complete map
|
487 |
+
descriptivity score Sd w.r.t. cell size sc. The violet line
|
488 |
+
depicts the computation of the descriptivity ratio Rd for each
|
489 |
+
sc and green background emphasizes the applicable range.
|
490 |
+
best fitting NDT cells:
|
491 |
+
Sd = 1
|
492 |
+
Np
|
493 |
+
Np
|
494 |
+
�
|
495 |
+
i=1
|
496 |
+
max fj(xi)
|
497 |
+
∥xi−µj∥2 ≤ 2sc
|
498 |
+
.
|
499 |
+
(4)
|
500 |
+
The maximum distance inside a grid cell is
|
501 |
+
√
|
502 |
+
3sc, and
|
503 |
+
therefore the radius of 2sc was considered large enough to
|
504 |
+
contain the highest fit.
|
505 |
+
We have defined two ratios, descriptivity ratio Rd requir-
|
506 |
+
ing sEA
|
507 |
+
c
|
508 |
+
= sNDT
|
509 |
+
c
|
510 |
+
(Fig. 6) and data compression ratio Rc:
|
511 |
+
Rd = SEA
|
512 |
+
d
|
513 |
+
�
|
514 |
+
SNDT
|
515 |
+
d
|
516 |
+
and
|
517 |
+
Rc = Npσp
|
518 |
+
�
|
519 |
+
Ncσc,
|
520 |
+
(5)
|
521 |
+
where superscripts EA and NDT stand for EA-NDT and
|
522 |
+
NDT, respectively, σp and σc are the data size of the point
|
523 |
+
and the cell, respectively. Using (5) while requiring SEA
|
524 |
+
d
|
525 |
+
=
|
526 |
+
SNDT
|
527 |
+
d
|
528 |
+
, we define an NDT compression efficiency (Fig. 5)
|
529 |
+
η = RNDT
|
530 |
+
c
|
531 |
+
�
|
532 |
+
REA
|
533 |
+
c
|
534 |
+
= N NDT
|
535 |
+
c
|
536 |
+
�
|
537 |
+
N EA
|
538 |
+
c .
|
539 |
+
(6)
|
540 |
+
V. RESULTS
|
541 |
+
In this section, we evaluate the quality between EA-
|
542 |
+
NDT and NDT map representations and demonstrate the
|
543 |
+
data compression of EA-NDT. The performance of both
|
544 |
+
methods was evaluated by stepping the cell size from 0.2 m
|
545 |
+
to 10 m with 30 values. Note that the computational time
|
546 |
+
increases exponentially with decreasing cell size. The lower
|
547 |
+
boundary of 0.2 m was selected since it could still be
|
548 |
+
computed overnight. Similarly, the upper boundary of 10 m
|
549 |
+
was considered large enough for this test. In Figs. 6–8, a
|
550 |
+
practically applicable range of 0.5 – 2.0 m, based on previous
|
551 |
+
work [9], is highlighted.
|
552 |
+
The evaluation of complete map representation on the
|
553 |
+
dataset described in Section IV-A is shown in Fig. 5. It
|
554 |
+
shows that EA-NDT map representation provides a higher
|
555 |
+
descriptivity score for any number of cells (note that the min-
|
556 |
+
imum number of cells is limited for EA-NDT as explained
|
557 |
+
in Section IV-B). However, typically the results of NDT are
|
558 |
+
compared as a function of the cell size, and therefore, in
|
559 |
+
|
560 |
+
Fig. 7: Comparison of descriptivity score Sd of each label
|
561 |
+
w.r.t. cell size sc, line style indicates the method, color the
|
562 |
+
label, and green background the applicable range.
|
563 |
+
Fig. 8: Both NDT compression efficiency η (above) and
|
564 |
+
descriptivity ratio Rd (below) of the proposed method are
|
565 |
+
visualized for the complete map and all labels w.r.t NDT cell
|
566 |
+
size sc, green background emphasizes the applicable range.
|
567 |
+
Fig. 6 we present an alternative comparison for which the
|
568 |
+
number of cells in EA-NDT were fitted with NDT as ex-
|
569 |
+
plained in Section IV-B. Likewise, descriptivity of EA-NDT
|
570 |
+
outperforms NDT with any cell size. By comparing Fig. 5
|
571 |
+
and Fig. 6, one can note that both plots are equally capable of
|
572 |
+
showing the differences between the compared methods. In
|
573 |
+
general, it can be noticed that the descriptivity score of NDT
|
574 |
+
approaches EA-NDT with smaller cells. However, this is an
|
575 |
+
expected phenomenon of grid cell division; the probability
|
576 |
+
of multiple objects to be associated within one cell decreases
|
577 |
+
with smaller cells.
|
578 |
+
In Table I in Section IV-B, it is shown that around 78.5 %
|
579 |
+
of the data consists of points labelled as ground or building,
|
580 |
+
which reflects a similar proportion to the number of cells
|
581 |
+
shown in Fig. 4. The descriptivity score of the complete map
|
582 |
+
is dominated by these abundant labels leaving the effect of
|
583 |
+
other labels imperceptible. Therefore, in Fig. 7, we present
|
584 |
+
an equivalent descriptivity score comparison separated for
|
585 |
+
each label, which in case of NDT is equivalent to SE-NDT
|
586 |
+
representation [11]. Similarly to the comparison of complete
|
587 |
+
map representation, EA-NDT descriptivity score of each
|
588 |
+
separate label is higher except for tree trunks with 3 – 6
|
589 |
+
m cells, for which the descriptivity score equals with NDT.
|
590 |
+
The low descriptivity of EA-NDT is most likely caused by
|
591 |
+
the use of K-means clustering because if a cluster is large or
|
592 |
+
a diameter of a trunk is small, a single cluster can contain
|
593 |
+
points within the entire circumference of the trunk resulting
|
594 |
+
in a non-Gaussian distribution. Moreover, the use of HDL-
|
595 |
+
64 vertical FOV limits the height of tree trunks and poles
|
596 |
+
in to a range of 2.5 – 3 m (as explained in Section IV-A)
|
597 |
+
and when the required cell size exceeds half of that height, a
|
598 |
+
large portion of the primitives is assigned into a one cluster
|
599 |
+
instead of two causing the observed discontinuity. However,
|
600 |
+
because of scaling the number of cells, the effect does not
|
601 |
+
appear exactly with the expected cell size. With tree trunk
|
602 |
+
and pole labels it is also observable that the descriptivity
|
603 |
+
does not decrease with the largest cells, because the size of
|
604 |
+
the primitive limits the cluster size from increasing.
|
605 |
+
The descriptivity ratio between EA-NDT and NDT is
|
606 |
+
shown in the lower part of Fig. 8. In general, the descriptivity
|
607 |
+
ratio increases for all the labels towards the greater cell
|
608 |
+
sizes, tree trunk and pole labels make an exception that was
|
609 |
+
already covered above. Within the applicable cell range, the
|
610 |
+
improvement in descriptivity ratio is more constant for all
|
611 |
+
labels. For the complete map with 2 m cells, the descriptivity
|
612 |
+
is 2× higher compared to NDT and for 10 m cells the
|
613 |
+
descriptivity is 20× higher. Especially, building and fence
|
614 |
+
labels show relatively higher descriptivity scores, which
|
615 |
+
suggests that the plane extraction is advantageous.
|
616 |
+
Map compression is a direct consequence of EA-NDT’s
|
617 |
+
higher descriptivity scores; EA-NDT achieves the same de-
|
618 |
+
scriptivity with a larger cell size which means a smaller num-
|
619 |
+
ber of cells. The NDT compression efficiency η, visualized
|
620 |
+
in the upper part of Fig. 8, was used to compare compression
|
621 |
+
of EA-NDT with NDT (note that η, shown in Fig. 5, can be
|
622 |
+
computed only when a corresponding score exists for both
|
623 |
+
methods). For the complete map representation, EA-NDT
|
624 |
+
provides 1.5 – 1.75× better compression within the whole
|
625 |
+
examined range. The compression of the complete EA-NDT
|
626 |
+
is mainly defined by the ground label, which is about 1.5×
|
627 |
+
better within the whole range. EA-NDT’s compression of
|
628 |
+
traffic sign and pole labels is more than 2.1× higher than
|
629 |
+
NDT for the smallest cells but drops steeply towards greater
|
630 |
+
cell sizes, though, remaining higher compression even for the
|
631 |
+
largest cells. For building and fence labels, the compression
|
632 |
+
is more than 2.2× higher around 0.5 m cell size, for smaller
|
633 |
+
|
634 |
+
and larger cells the NDT compression efficiency decreases.
|
635 |
+
This suggests that EA-NDT’s technique of modeling the
|
636 |
+
planes and excluding the other points is beneficial until cell
|
637 |
+
size of about 0.5 m but with smaller cells, NDT reaches
|
638 |
+
the difference by modeling the excluded points. Finally, we
|
639 |
+
can state that the complete map representation of EA-NDT
|
640 |
+
achieves the highest compression improvement around 0.7 m
|
641 |
+
cell size which is also within the applicable cell size range.
|
642 |
+
VI. DISCUSSION
|
643 |
+
The proposed EA-NDT achieves 1) at least 1.5× higher
|
644 |
+
compression, and 2) always a higher descriptivity score
|
645 |
+
with the same number of cells compared to NDT as shown
|
646 |
+
in Fig. 8. For separately tested semantic labels, EA-NDT
|
647 |
+
achieves 1) always a higher compression, and 2) a higher
|
648 |
+
descriptivity score in the applicable cell size range of 0.5 –
|
649 |
+
2.0 m. However, we suggest to use cell sizes of 0.5 – 1.0
|
650 |
+
m for EA-NDT in a suburban environment since our results
|
651 |
+
(Fig. 8) indicate that this range provides better compression.
|
652 |
+
Due to the semantic-aided instance clustering and primi-
|
653 |
+
tive extraction, the proposed EA-NDT is able to find the most
|
654 |
+
significant planar and cylindrical primitives in the environ-
|
655 |
+
ment. NDT representation is especially informative within
|
656 |
+
planar and thin cylindrical structures that can be modeled
|
657 |
+
with a unimodal distribution, and therefore, EA-NDT is
|
658 |
+
able to model the environment more optimally compared
|
659 |
+
to NDT. Moreover, the use of semantic information enables
|
660 |
+
selection of the stable objects that should be modeled in the
|
661 |
+
map. Finally, K-means clustering of the primitives ensures
|
662 |
+
data efficient placement of cells where they are needed.
|
663 |
+
The advantage of EA-NDT is a result of improved point
|
664 |
+
cloud division. Therefore, the advantage is prominent in
|
665 |
+
small objects such as poles or complicated structures such as
|
666 |
+
buildings or fences. In the ground plane, the advantage is less
|
667 |
+
evident because it is in any case a one large plane and the
|
668 |
+
benefit can be almost completely explained by the removal
|
669 |
+
of outliers and by the efficiency gained from clustering the
|
670 |
+
ground plane.
|
671 |
+
Finding planar primitives in building and fence instances
|
672 |
+
removes some points which are not modeled by EA-NDT
|
673 |
+
HD Map. As shown in our results in Fig. 8, that is beneficial
|
674 |
+
for compression with cell sizes above 0.5 m but for smaller
|
675 |
+
cell sizes it could be beneficial to model those points with
|
676 |
+
additional NDT cells. However, as shown in this work,
|
677 |
+
the described effect is not significant within the range of
|
678 |
+
the suggested cell sizes. Moreover, the suggested correction
|
679 |
+
should be justified only if it improves also the performance
|
680 |
+
of scan registration.
|
681 |
+
The classification accuracy of the pre-trained RandLA-Net
|
682 |
+
(see Section IV-A) was a limiting factor for the quality of se-
|
683 |
+
mantic segmentation. The misclassification increases the total
|
684 |
+
number of cells when overlapping cells of different semantic
|
685 |
+
labels model the same object (see Fig. 1), which reduces the
|
686 |
+
compression of EA-NDT representation. In future work, the
|
687 |
+
classification accuracy of the semantic segmentation could be
|
688 |
+
improved by using a more advanced model [34], [35] and
|
689 |
+
by retraining the model for the used LiDAR.
|
690 |
+
The proposed EA-NDT was tested in a suburban envi-
|
691 |
+
ronment that consist of 1) a flat ground, buildings, fences,
|
692 |
+
and traffic signs, which are modeled as planar surfaces, and
|
693 |
+
2) poles and tree trunks, which are modeled as cylindrical
|
694 |
+
objects. The tested environment contains enough samples of
|
695 |
+
all the tested semantic labels to demonstrate that EA-NDT is
|
696 |
+
able to compress the data more than NDT. However, our tests
|
697 |
+
did not concern vegetation, tree canopies, water, significant
|
698 |
+
height variations, nor high rise buildings. In the future, a
|
699 |
+
larger variety of environments should be studied.
|
700 |
+
EA-NDT provides map compression within the tested
|
701 |
+
semantic labels in environments where 1) the used semantic
|
702 |
+
labels exist in the environment, 2) the reliability of semantic
|
703 |
+
segmentation is high enough, and 3) instances are separated
|
704 |
+
by sufficient distance. In order to use the proposed EA-NDT,
|
705 |
+
the following assumptions need to hold: 1) ground, buildings,
|
706 |
+
fences, and traffic signs must be composed of planar surfaces,
|
707 |
+
and 2) tree trunks and poles need to be cylindrical. In future
|
708 |
+
work, for other semantic labels, the type of primitive would
|
709 |
+
need to be defined according to the properties of that label.
|
710 |
+
Semantic information is a powerful tool and a key enabler
|
711 |
+
of HD Maps. In this work, we have shown that semantic
|
712 |
+
information enables separate processes for each semantic
|
713 |
+
label which results into more optimal clustering of point
|
714 |
+
cloud data. Furthermore, in previous works semantic infor-
|
715 |
+
mation has been used to improve positioning [11], [12],
|
716 |
+
[17]. Moreover, semantic information enables the removal of
|
717 |
+
unwanted dynamic objects from the map. In future work, the
|
718 |
+
use of semantic information opens a possibility to study the
|
719 |
+
positioning accuracy and reliability of different object types
|
720 |
+
over time. That could be especially useful when navigating
|
721 |
+
in constantly changing environments such as arctic areas.
|
722 |
+
This work was outlined on evaluating compression and
|
723 |
+
descriptivity properties of EA-NDT HD Map, and therefore,
|
724 |
+
the positioning performance of the proposed framework
|
725 |
+
remains an open question for the future work. Although,
|
726 |
+
the positioning was not evaluated, the well established scan
|
727 |
+
registration and cell representation of NDT is integrated into
|
728 |
+
positioning of EA-NDT. Moreover, the data compression
|
729 |
+
of an HD map is a desired property of any mobile robot
|
730 |
+
application. Another open question is how the proposed
|
731 |
+
EA-NDT HD map can be efficiently updated with new
|
732 |
+
information. Also, currently the computation of EA-NDT
|
733 |
+
is very slow and the computational optimization is left for
|
734 |
+
future work.
|
735 |
+
VII. CONCLUSIONS
|
736 |
+
In this work, we proposed EA-NDT, that is a novel
|
737 |
+
framework to compute a compressed map representation
|
738 |
+
based on NDT formulation. The fundamental concept of
|
739 |
+
EA-NDT is semantic-aided clustering to find planar and
|
740 |
+
cylindrical primitive features of a point cloud to model them
|
741 |
+
as planar or elongated normal distributions in a 3D space,
|
742 |
+
respectively.
|
743 |
+
We showed that compared to NDT, the data-driven ap-
|
744 |
+
proach of EA-NDT achieves consistently at least 1.5× higher
|
745 |
+
map descriptivity score, and therefore enables a significant
|
746 |
+
|
747 |
+
map compression without deteriorating the descriptive ca-
|
748 |
+
pability of the map. The best compression in comparison
|
749 |
+
to NDT is obtained within cell sizes of 0.5 – 2 m, which
|
750 |
+
is an applicable range for real-time positioning. Moreover,
|
751 |
+
the results show that compared to NDT, the representation
|
752 |
+
achieves a higher data compression within all the tested
|
753 |
+
semantic labels, that is a desired property for mobile robots
|
754 |
+
such as autonomous vehicles.
|
755 |
+
When data compression is a required property of an HD
|
756 |
+
map, we recommend the use of EA-NDT instead of NDT.
|
757 |
+
Based on the results of this work, it seems likely that the
|
758 |
+
positioning accuracy using EA-NDT maps exceeds that of
|
759 |
+
standard NDT maps of same size. However, this warrants
|
760 |
+
future studies because there are several interacting factors
|
761 |
+
such as potentially varying contribution of different semantic
|
762 |
+
labels to the positioning accuracy.
|
763 |
+
ACKNOWLEDGMENT
|
764 |
+
In addition, the authors would like to thank Paula Litkey
|
765 |
+
and Eero Ahokas from FGI for data management and col-
|
766 |
+
lection and Antero Kukko and Harri Kaartinen from FGI
|
767 |
+
for assistance and advices. We would also like to thank Leo
|
768 |
+
Pakola for participation in the research vehicle development.
|
769 |
+
REFERENCES
|
770 |
+
[1] A. Zaidi and M. Suddle, “Global navigation satellite systems: a
|
771 |
+
survey,” in 2006 International Conference on Advances in Space
|
772 |
+
Technologies, pp. 84–87, 2006.
|
773 |
+
[2] J. Wang, “Pseudolite applications in positioning and navigation:
|
774 |
+
Progress and problems,” Journal of Global Positioning Systems, vol. 1,
|
775 |
+
no. 03, pp. 48–56, 2002.
|
776 |
+
[3] C. Cadena, L. Carlone, H. Carrillo, Y. Latif, D. Scaramuzza, J. Neira,
|
777 |
+
I. Reid, and J. J. Leonard, “Past, present, and future of simultaneous
|
778 |
+
localization and mapping: Toward the robust-perception age,” IEEE
|
779 |
+
Transactions on Robotics, vol. 32, no. 6, pp. 1309–1332, 2016.
|
780 |
+
[4] H. G. Seif and X. Hu, “Autonomous driving in the iCity—HD maps
|
781 |
+
as a key challenge of the automotive industry,” Engineering, vol. 2,
|
782 |
+
no. 2, pp. 159–162, 2016.
|
783 |
+
[5] R. Liu, J. Wang, and B. Zhang, “High definition map for automated
|
784 |
+
driving: Overview and analysis,” The Journal of Navigation, vol. 73,
|
785 |
+
no. 2, pp. 324–341, 2020.
|
786 |
+
[6] R. Dub´e, A. Cramariuc, D. Dugas, H. Sommer, M. Dymczyk, J. Nieto,
|
787 |
+
R. Siegwart, and C. Cadena, “SegMap: Segment-based mapping and
|
788 |
+
localization using data-driven descriptors,” The International Journal
|
789 |
+
of Robotics Research, vol. 39, no. 2-3, pp. 339–355, 2020.
|
790 |
+
[7] H. Yin, Y. Wang, L. Tang, X. Ding, S. Huang, and R. Xiong,
|
791 |
+
“3D LiDAR map compression for efficient localization on resource
|
792 |
+
constrained vehicles,” IEEE Transactions on Intelligent Transportation
|
793 |
+
Systems, vol. 22, no. 2, pp. 837–852, 2020.
|
794 |
+
[8] M.-F. Chang, W. Dong, J. Mangelson, M. Kaess, and S. Lucey, “Map
|
795 |
+
compressibility assessment for LiDAR registration,” in 2021 IEEE/RSJ
|
796 |
+
International Conference on Intelligent Robots and Systems (IROS),
|
797 |
+
pp. 5560–5567, 2021.
|
798 |
+
[9] M. Magnusson, The three-dimensional normal-distributions transform:
|
799 |
+
an efficient representation for registration, surface analysis, and loop
|
800 |
+
detection. PhD thesis, ¨Orebro University, 2009.
|
801 |
+
[10] T. Stoyanov, M. Magnusson, H. Andreasson, and A. J. Lilienthal, “Fast
|
802 |
+
and accurate scan registration through minimization of the distance
|
803 |
+
between compact 3D NDT representations,” The International Journal
|
804 |
+
of Robotics Research, vol. 31, no. 12, pp. 1377–1393, 2012.
|
805 |
+
[11] A. Zaganidis, M. Magnusson, T. Duckett, and G. Cielniak, “Semantic-
|
806 |
+
assisted 3D normal distributions transform for scan registration in
|
807 |
+
environments with limited structure,” in 2017 IEEE/RSJ International
|
808 |
+
Conference on Intelligent Robots and Systems (IROS), pp. 4064–4069,
|
809 |
+
2017.
|
810 |
+
[12] A. Zaganidis, L. Sun, T. Duckett, and G. Cielniak, “Integrating
|
811 |
+
deep semantic segmentation into 3-D point cloud registration,” IEEE
|
812 |
+
Robotics and Automation Letters, vol. 3, no. 4, pp. 2942–2949, 2018.
|
813 |
+
[13] C. Sammut and G. I. Webb, Encyclopedia of machine learning and
|
814 |
+
data mining. Springer Publishing Company, Incorporated, 2017.
|
815 |
+
[14] G. Bresson, Z. Alsayed, L. Yu, and S. Glaser, “Simultaneous localiza-
|
816 |
+
tion and mapping: A survey of current trends in autonomous driving,”
|
817 |
+
IEEE Transactions on Intelligent Vehicles, vol. 2, no. 3, pp. 194–220,
|
818 |
+
2017.
|
819 |
+
[15] P. J. Besl and N. D. McKay, “Method for registration of 3-D shapes,” in
|
820 |
+
Sensor Fusion IV: Control Paradigms and Data Structures, vol. 1611,
|
821 |
+
pp. 586–606, 1992.
|
822 |
+
[16] C. R. Qi, H. Su, K. Mo, and L. J. Guibas, “PointNet: Deep learning
|
823 |
+
on point sets for 3D classification and segmentation,” in Proceedings
|
824 |
+
of the IEEE/CVF Conference on Computer Vision and Pattern Recog-
|
825 |
+
nition (CVPR), pp. 652–660, 2017.
|
826 |
+
[17] S. Cho, C. Kim, J. Park, M. Sunwoo, and K. Jo, “Semantic point
|
827 |
+
cloud mapping of LiDAR based on probabilistic uncertainty modeling
|
828 |
+
for autonomous driving,” Sensors, vol. 20, no. 20, p. 5900, 2020.
|
829 |
+
[18] Q. Hu, B. Yang, L. Xie, S. Rosa, Y. Guo, Z. Wang, N. Trigoni, and
|
830 |
+
A. Markham, “RandLA-Net: Efficient semantic segmentation of large-
|
831 |
+
scale point clouds,” in Proceedings of the IEEE/CVF Conference on
|
832 |
+
Computer Vision and Pattern Recognition (CVPR), pp. 11108–11117,
|
833 |
+
2020.
|
834 |
+
[19] J. Zhang, X. Zhao, Z. Chen, and Z. Lu, “A review of deep learning-
|
835 |
+
based semantic segmentation for point cloud,” IEEE Access, vol. 7,
|
836 |
+
pp. 179118–179133, 2019.
|
837 |
+
[20] J. Kammerl, N. Blodow, R. B. Rusu, S. Gedikli, M. Beetz, and
|
838 |
+
E. Steinbach, “Real-time compression of point cloud streams,” in 2012
|
839 |
+
IEEE International Conference on Robotics and Automation, pp. 778–
|
840 |
+
785, 2012.
|
841 |
+
[21] Velodyne, VLS-128 Alpha Puck, 2019. 63-9480 Rev-3 datasheet.
|
842 |
+
[22] Novatel, PwrPak7-E1, October 2020. D18496 Version 7 datasheet.
|
843 |
+
[23] J. Maanp¨a¨a, J. Taher, P. Manninen, L. Pakola, I. Melekhov, and
|
844 |
+
J. Hyypp¨a, “Multimodal end-to-end learning for autonomous steering
|
845 |
+
in adverse road and weather conditions,” in 2020 25th International
|
846 |
+
Conference on Pattern Recognition (ICPR), pp. 699–706, 2021.
|
847 |
+
[24] M. Quigley, B. Gerkey, K. Conley, J. Faust, T. Foote, J. Leibs,
|
848 |
+
E. Berger, R. Wheeler, and A. Ng, “ROS: an open-source robot
|
849 |
+
operating system,” in ICRA Workshop on Open Source Software in
|
850 |
+
Robotics, 2009.
|
851 |
+
[25] J. Behley, M. Garbade, A. Milioto, J. Quenzel, S. Behnke, C. Stach-
|
852 |
+
niss, and J. Gall, “SemanticKITTI: A dataset for semantic scene
|
853 |
+
understanding of LiDAR sequences,” in Proceedings of the IEEE/CVF
|
854 |
+
International Conference on Computer Vision (ICCV), pp. 9297–9307,
|
855 |
+
2019.
|
856 |
+
[26] Novatel, Inertial Explorer, February 2020.
|
857 |
+
D18034 Version 9
|
858 |
+
brochure.
|
859 |
+
[27] K.
|
860 |
+
Koide,
|
861 |
+
“OpenMP-boosted
|
862 |
+
normal
|
863 |
+
distributions
|
864 |
+
transform.”
|
865 |
+
https://github.com/koide3/ndt_omp, 2017.
|
866 |
+
Referenced
|
867 |
+
26 January 2022.
|
868 |
+
[28] R. B. Rusu and S. Cousins, “3D is here: Point cloud library (PCL),”
|
869 |
+
in 2011 IEEE International Conference on Robotics and Automation,
|
870 |
+
2011.
|
871 |
+
[29] A. J. Trevor, S. Gedikli, R. B. Rusu, and H. I. Christensen, “Efficient
|
872 |
+
organized point cloud segmentation with connected components,” in
|
873 |
+
3rd Workshop on Semantic Perception, Mapping and Exploration
|
874 |
+
(SPME), 2013.
|
875 |
+
[30] M. A. Fischler and R. C. Bolles, “Random sample consensus: a
|
876 |
+
paradigm for model fitting with applications to image analysis and
|
877 |
+
automated cartography,” Communications of the ACM, vol. 24, no. 6,
|
878 |
+
pp. 381–395, 1981.
|
879 |
+
[31] N. Sarten, “A generic C++11 k-means clustering implementation.”
|
880 |
+
https://github.com/genbattle/dkm, 2015. Referenced 26
|
881 |
+
January 2022.
|
882 |
+
[32] D. Arthur and S. Vassilvitskii, “k-means++: The advantages of careful
|
883 |
+
seeding,” in SODA ’07: Proceedings of the Eighteenth Annual ACM-
|
884 |
+
SIAM Symposium on Discrete Algorithms, pp. 1027–1035, 2007.
|
885 |
+
[33] S. J. Prince, Computer vision: models, learning, and inference. Cam-
|
886 |
+
bridge University Press, 2012.
|
887 |
+
[34] H. Tang, Z. Liu, S. Zhao, Y. Lin, J. Lin, H. Wang, and S. Han, “Search-
|
888 |
+
ing efficient 3D architectures with sparse point-voxel convolution,” in
|
889 |
+
European Conference on Computer Vision, pp. 685–702, 2020.
|
890 |
+
[35] X. Zhu, H. Zhou, T. Wang, F. Hong, Y. Ma, W. Li, H. Li, and
|
891 |
+
D. Lin, “Cylindrical and asymmetrical 3D convolution networks for
|
892 |
+
LiDAR segmentation,” in Proceedings of the IEEE/CVF Conference
|
893 |
+
on Computer Vision and Pattern Recognition (CVPR), pp. 9939–9948,
|
894 |
+
2021.
|
895 |
+
|
0dE2T4oBgHgl3EQfiQdl/content/tmp_files/load_file.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1tE0T4oBgHgl3EQfuQHv/vector_store/index.faiss
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:55829734e8880a799b280ace52c650cdedc27f7b438f3845f5aaa0c9debae349
|
3 |
+
size 5046317
|
1tE0T4oBgHgl3EQfuQHv/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6fc48272f6b37ad96892cfa6a1864234586ad656e0c04e27e718cc22a6971620
|
3 |
+
size 184595
|
29E1T4oBgHgl3EQflwQn/content/2301.03288v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b6b14abc3abf8f8cc6c7904c344fc4dfab4c52aa2d579abb8dc17a5537a7b24e
|
3 |
+
size 1933910
|
29E1T4oBgHgl3EQflwQn/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1a81d5dd18fec218494c6771ea9f48608b837c25f53460693541307cf13bf2fb
|
3 |
+
size 104835
|
2NAzT4oBgHgl3EQfe_yo/content/tmp_files/2301.01446v1.pdf.txt
ADDED
@@ -0,0 +1,691 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Radio Frequency Fingerprints Extraction for
|
2 |
+
LTE-V2X: A Channel Estimation Based Methodology
|
3 |
+
Tianshu Chen∗, Hong Shen∗, Aiqun Hu∗†, Weihang He‡, Jie Xu‡, Hongxing Hu§
|
4 |
+
∗National Mobile Communications Research Laboratory, Southeast University, Nanjing, China
|
5 |
+
†The Purple Mountain Laboratories for Network and Communication Security, Nanjing, China
|
6 |
+
‡School of Cyber Science and Engineering, Southeast University, Nanjing, China
|
7 |
+
§China Automotive Innovation Corporation, Nanjing, China
|
8 |
+
Email: {iamtianshu, shhseu, aqhu, 220205165, 220205095}@seu.edu.cn, huhongxing@t3caic.com
|
9 |
+
Abstract—The vehicular-to-everything (V2X) technology has
|
10 |
+
recently drawn a number of attentions from both academic and
|
11 |
+
industrial areas. However, the openness of the wireless communi-
|
12 |
+
cation system makes it more vulnerable to identity impersonation
|
13 |
+
and information tampering. How to employ the powerful radio
|
14 |
+
frequency fingerprint (RFF) identification technology in V2X
|
15 |
+
systems turns out to be a vital and also challenging task. In
|
16 |
+
this paper, we propose a novel RFF extraction method for Long
|
17 |
+
Term Evolution-V2X (LTE-V2X) systems. In order to conquer the
|
18 |
+
difficulty of extracting transmitter RFF in the presence of wireless
|
19 |
+
channel and receiver noise, we first estimate the wireless channel
|
20 |
+
which excludes the RFF. Then, we remove the impact of the
|
21 |
+
wireless channel based on the channel estimate and obtain initial
|
22 |
+
RFF features. Finally, we conduct RFF denoising to enhance the
|
23 |
+
quality of the initial RFF. Simulation and experiment results both
|
24 |
+
demonstrate that our proposed RFF extraction scheme achieves
|
25 |
+
a high identification accuracy. Furthermore, the performance is
|
26 |
+
also robust to the vehicle speed.
|
27 |
+
Index Terms—Vehicular-to-everything (V2X), radio frequency
|
28 |
+
fingerprint (RFF), device identification, channel estimation, RFF
|
29 |
+
denoising
|
30 |
+
I. INTRODUCTION
|
31 |
+
Vehicular-to-everything (V2X) has become a promising
|
32 |
+
technique for intelligent transportation and autonomous driv-
|
33 |
+
ing. In particular, the cellular-V2X (C-V2X) has been widely
|
34 |
+
acknowledged as a key V2X communication standard due to
|
35 |
+
its superior performance [1], [2].
|
36 |
+
Since V2X relies on wireless transmission, the information
|
37 |
+
is easy to be eavesdropped, forged or tampered with, which
|
38 |
+
imposes great challenges on the safety of vehicles, pedestrians
|
39 |
+
and road infrastructures in the V2X communication network
|
40 |
+
[3]. To deal with the security threats faced by wireless
|
41 |
+
communications, there are usually two widely used authen-
|
42 |
+
tication strategies: key-based cryptographic authentication and
|
43 |
+
physical layer security-based non-cryptographic authentication
|
44 |
+
[4]. The cryptographic authentication technology needs to
|
45 |
+
© 2022 IEEE. Personal use of this material is permitted. Permission from
|
46 |
+
IEEE must be obtained for all other uses, in any current or future media,
|
47 |
+
including reprinting/republishing this material for advertising or promotional
|
48 |
+
purposes, creating new collective works, for resale or redistribution to servers
|
49 |
+
or lists, or reuse of any copyrighted component of this work in other works.
|
50 |
+
distribute and manage abundant communication keys, which
|
51 |
+
occupies computing resources and leads to additional overhead
|
52 |
+
and delays. Moreover, with the rapid development of comput-
|
53 |
+
ing capability of the computers, especially the emergence of
|
54 |
+
quantum computers, traditional cryptography technologies are
|
55 |
+
more vulnerable to brute-force attacks [5]. On the contrary, the
|
56 |
+
physical layer security based authentication has lower com-
|
57 |
+
plexity and network overhead with lower latency compared
|
58 |
+
to traditional cryptography-based authentication methods, and
|
59 |
+
can achieve non-perceptual authentication without third-party
|
60 |
+
facilities. One typical example is the radio frequency fin-
|
61 |
+
gerprint (RFF) based authentication, which fully exploits the
|
62 |
+
hardware differences between any two devices. Since the
|
63 |
+
hardware characteristic of each device is unique and difficult
|
64 |
+
to clone, the RFF based authentication can better resist the
|
65 |
+
identity attacks and spoofing [6].
|
66 |
+
In literature, a variety of RFF extraction and identification
|
67 |
+
methods have been advocated. Early works mainly focus on
|
68 |
+
the characteristics of transient signals, such as instantaneous
|
69 |
+
amplitude, frequency, and phase responses [7]. Concerning
|
70 |
+
the steady-state signal, such as preamble signals, researchers
|
71 |
+
consider extracting the RFF features including I/Q offset
|
72 |
+
[8], power spectral density [9], differential constellation trace
|
73 |
+
figure [10]. Furthermore, some universal RFF extraction meth-
|
74 |
+
ods which are independent of data, channel or modulation
|
75 |
+
modes have also been studied. Concretely, Shen et al. [11]
|
76 |
+
constructed channel independent spectrogram and utilized data
|
77 |
+
augmentation for RFF extraction and identification of Lora
|
78 |
+
devices, which achieves good performance under different
|
79 |
+
channel conditions. Alternatively, Yang et al. [12] used random
|
80 |
+
data segments to extract the tap coefficients of the least mean
|
81 |
+
square (LMS) adaptive filter as data independent RFF. Sun et
|
82 |
+
al. [13] verified the locality and inhomogeneity of the RFF
|
83 |
+
distribution with the analysis in the cepstral domain, which
|
84 |
+
yields modulation mode independent RFF.
|
85 |
+
The aforementioned works mainly consider the RFF extrac-
|
86 |
+
tion for low mobility and narrowband systems. However, for
|
87 |
+
the V2X system, the channel typically varies fast due to the
|
88 |
+
high mobility vehicles. In addition, the V2X signal usually
|
89 |
+
arXiv:2301.01446v1 [eess.SP] 4 Jan 2023
|
90 |
+
|
91 |
+
Signal preprocessing
|
92 |
+
RFF feature extraction
|
93 |
+
and denoising
|
94 |
+
Device identification
|
95 |
+
Digital baseband
|
96 |
+
signal
|
97 |
+
Mixer
|
98 |
+
up-conversion
|
99 |
+
Transmitter RFF model
|
100 |
+
RF front-end power amplifier
|
101 |
+
DAC
|
102 |
+
Baseband low-pass filter
|
103 |
+
TX antenna
|
104 |
+
OBU
|
105 |
+
OBU
|
106 |
+
RSU
|
107 |
+
V2P
|
108 |
+
V2I
|
109 |
+
Receiver
|
110 |
+
Channel estimation
|
111 |
+
and equalization
|
112 |
+
RFF identification
|
113 |
+
and access system
|
114 |
+
I/Q DC offset
|
115 |
+
Non-linearity
|
116 |
+
Gain imbalance
|
117 |
+
and phase deviation
|
118 |
+
Frequency response
|
119 |
+
deviation
|
120 |
+
Fig. 1. LTE-V2X RFF extraction and identification system framework and RFF model at the transmitter.
|
121 |
+
has a large bandwidth which is more vulnerable to multipath
|
122 |
+
environment. Therefore, the current RFF extraction methods
|
123 |
+
for narrowband systems such as ZigBee and Lora cannot be
|
124 |
+
directly applied for the V2X system because they do not
|
125 |
+
take into account the impact of multipath and time-varying
|
126 |
+
channels.
|
127 |
+
In this work, we propose a channel estimation based RFF
|
128 |
+
extraction method for Long Term Evolution-V2X (LTE-V2X)
|
129 |
+
systems, which, to the best of our knowledge, has not been
|
130 |
+
investigated in existing works. Specifically, we first estimate
|
131 |
+
the experienced wireless channel using an improved least
|
132 |
+
square (LS) channel estimation method. Then, we perform
|
133 |
+
channel equalization based on the channel estimate to obtain
|
134 |
+
channel dependent RFF. The RFF quality is further enhanced
|
135 |
+
via conducting time-domain denoising. It is worthwhile noting
|
136 |
+
that the developed method eliminates the effect of the channel
|
137 |
+
and the noise on the RFF with low implementation complex-
|
138 |
+
ity, and can be extended to various broadband multi-carrier
|
139 |
+
wireless communication systems.
|
140 |
+
This paper is organized as follows. Section II introduces the
|
141 |
+
system model and signal preprocessing. Section III presents
|
142 |
+
the details of the proposed RFF extraction methodology based
|
143 |
+
on wireless channel estimation. Section IV evaluates the
|
144 |
+
performance of the proposed RFF extraction method through
|
145 |
+
simulations and experiments. Section V concludes this work.
|
146 |
+
II. SYSTEM MODEL AND SIGNAL PREPROCESSING
|
147 |
+
A. System Model
|
148 |
+
Fig. 1 demonstrates the framework of the considered LTE-
|
149 |
+
V2X RFF extraction and identification system together with
|
150 |
+
the RFF model at the transmitter. More concretely, one V2X
|
151 |
+
terminal, e.g., on board unit (OBU) or road side unit (RSU),
|
152 |
+
first transmits data to other devices, where the transmitted
|
153 |
+
signal includes the RFF of the transmitter. Then, the receiver
|
154 |
+
preprocesses the received signal which consists of converting
|
155 |
+
the RF signal to the baseband signal and performing time-
|
156 |
+
frequency synchronization. Subsequently, the RFF features are
|
157 |
+
extracted based on the synchronized signal, where the effects
|
158 |
+
of the wireless channel and the noise on the RFF need to be
|
159 |
+
mitigated. Finally, the device identification is performed using
|
160 |
+
the extracted RFF features.
|
161 |
+
It is necessary to note that the considered RFF refers to all
|
162 |
+
the characteristics of the circuits at the transmitter, which, as
|
163 |
+
shown in Fig. 1, include the I/Q DC offsets of the digital-to-
|
164 |
+
analog converter (DAC), the frequency response deviation of
|
165 |
+
the filter, the gain imbalance and the carrier phase quadrature
|
166 |
+
deviation of the mixer, and the non-linearity of the power
|
167 |
+
amplifier [14].
|
168 |
+
B. LTE-V2X PSBCH
|
169 |
+
We adopt the physical sidelink broadcast channel (PSBCH)
|
170 |
+
in LTE-V2X systems for RFF extraction. According to [15],
|
171 |
+
PSBCH is transmitted every 160 ms occupying the central 6
|
172 |
+
|
173 |
+
PSBCH
|
174 |
+
PSBCH
|
175 |
+
PSSS
|
176 |
+
PSSS
|
177 |
+
DMRS
|
178 |
+
PSBCH
|
179 |
+
DMRS
|
180 |
+
DMRS
|
181 |
+
PSBCH
|
182 |
+
PSBCH
|
183 |
+
PSBCH
|
184 |
+
SSSS
|
185 |
+
SSSS
|
186 |
+
GUARD
|
187 |
+
1 2 3 4 5 6 7 8 9 10 11 12 13 14 time
|
188 |
+
1 ms
|
189 |
+
frequency
|
190 |
+
6 RBs
|
191 |
+
Fig. 2. LTE-V2X PSBCH format.
|
192 |
+
resource blocks (RBs), i.e., 72 subcarriers and 14 single-carrier
|
193 |
+
frequency division multiple access (SC-FDMA) symbols.
|
194 |
+
The detailed format of PSBCH is shown in Fig. 2, where
|
195 |
+
primary sidelink synchronization signal (PSSS), secondary
|
196 |
+
sidelink synchronization signal (SSSS), and demodulation
|
197 |
+
reference signal (DMRS) all depend on the currently used
|
198 |
+
sidelink synchronization signal (SLSS) ID. Since the SLSS
|
199 |
+
ID can be estimated [15], we can readily obtain ideal PSSS,
|
200 |
+
SSSS, and DMRS at the receiver which are used for extracting
|
201 |
+
transmitter RFF.
|
202 |
+
C. Signal Preprocessing
|
203 |
+
In order to ensure the stability of the extracted RFF, the
|
204 |
+
signal preprocessing procedure includes time synchronization
|
205 |
+
and carrier frequency offset (CFO) estimation and compensa-
|
206 |
+
tion after the received signal is down-converted from the RF
|
207 |
+
band to the baseband.
|
208 |
+
The time synchronization is realized by utilizing two identi-
|
209 |
+
cal training symbols, e.g., two repeated PSSS or SSSS symbols
|
210 |
+
in LTE-V2X PSBCH, and the cross-correlation between the
|
211 |
+
received signal r(n) and the training signal x(n) as
|
212 |
+
P(d)=
|
213 |
+
N−1
|
214 |
+
�
|
215 |
+
n=0
|
216 |
+
|r(n+d)x∗(n)|2+
|
217 |
+
N−1
|
218 |
+
�
|
219 |
+
n=0
|
220 |
+
|r(n+d+N +NCP )x∗(n)|2 ,
|
221 |
+
(1)
|
222 |
+
where N = 2048 for LTE-V2X systems and NCP denotes
|
223 |
+
the length of the cyclic prefix (CP). When P(d) exceeds a
|
224 |
+
given threshold PTH and reaches the maximum, we obtain the
|
225 |
+
estimated starting position of the training symbol [16], which
|
226 |
+
is expressed by
|
227 |
+
ˆd = arg
|
228 |
+
max
|
229 |
+
d∈{d|P (d)>PTH} P(d).
|
230 |
+
(2)
|
231 |
+
Afterwards, the CFO is estimated by performing auto-
|
232 |
+
correlation between adjacent two identical PSSS symbols and
|
233 |
+
two identical SSSS symbols [17], which is expressed as
|
234 |
+
ˆε =
|
235 |
+
1
|
236 |
+
2π(N +NCP )angle
|
237 |
+
�N−1
|
238 |
+
�
|
239 |
+
n=0
|
240 |
+
[r(n+ ˆd)r∗(n+ ˆd+N +NCP )]
|
241 |
+
+
|
242 |
+
N−1
|
243 |
+
�
|
244 |
+
n=0
|
245 |
+
[r(n+∆n+ ˆd)r∗(n+∆n+ ˆd+N +NCP )]
|
246 |
+
�
|
247 |
+
,
|
248 |
+
(3)
|
249 |
+
where angle{·} returns the phase angle of the input complex
|
250 |
+
number and ∆n represents the number of the sampling points
|
251 |
+
(a) initial time domain channel esti-
|
252 |
+
mate h5(n)
|
253 |
+
(b) windowed time domain channel
|
254 |
+
estimate ˆh5(n)
|
255 |
+
Fig. 3. The initial and windowed time domain channel estimates of the DMRS
|
256 |
+
symbol.
|
257 |
+
between the first PSSS and the first SSSS. Accordingly, we
|
258 |
+
obtain the CFO compensated signal by
|
259 |
+
y(n) = ˜r(n)e−j2πnˆε,
|
260 |
+
(4)
|
261 |
+
where ˜r(n) denotes the time synchronized signal.
|
262 |
+
III. PROPOSED RFF EXTRACTION METHOD
|
263 |
+
In this section, we propose a novel PSBCH based RFF ex-
|
264 |
+
traction method for LTE-V2X systems, which mainly includes
|
265 |
+
channel estimation, channel equalization, and RFF denoising.
|
266 |
+
A. Channel Estimation
|
267 |
+
We adopt the improved LS algorithm [18] for channel
|
268 |
+
estimation. The main idea of the algorithm is to obtain the
|
269 |
+
initial frequency domain channel estimate through the LS
|
270 |
+
algorithm, which is then transformed into the time domain
|
271 |
+
via inverse discrete Fourier transform (IDFT). Afterwards, we
|
272 |
+
perform time-domain windowing to exclude the noise and
|
273 |
+
the RFF. The resultant signal is finally transformed into the
|
274 |
+
frequency domain via discrete Fourier transform (DFT). The
|
275 |
+
detailed steps of channel estimation for the PSBCH subframe
|
276 |
+
are described as follows.
|
277 |
+
Denote the i-th time-domain SC-FDMA symbol of the
|
278 |
+
received PSBCH after preprocessing and CP removal by yi(n),
|
279 |
+
which carries RFF information and channel information. Then,
|
280 |
+
we transform the time-domain received signals corresponding
|
281 |
+
to the PSSS, the SSSS, and the DMRS symbols into the
|
282 |
+
frequency domain by performing DFT, which is expressed as
|
283 |
+
Yi(k) = DFTN{yi(n)}, 0 ≤ k ≤ N − 1,
|
284 |
+
(5)
|
285 |
+
where DFTN{·} denotes the N-point DFT and i = 2, 3, 5, 7,
|
286 |
+
10, 12, 13. Denote the frequency domain received signal cor-
|
287 |
+
responding to the effective bandwidth occupied by the PSSS,
|
288 |
+
the SSSS, and the DMRS as ÙYi(k). Then, the initial frequency
|
289 |
+
domain channel estimate of the i-th symbol ˆHi(k) containing
|
290 |
+
the RFF and the noise is calculated by
|
291 |
+
ˆHi(k) =
|
292 |
+
ÙYi(k)
|
293 |
+
Ù
|
294 |
+
Xi(k)
|
295 |
+
, k ∈ Ni,
|
296 |
+
(6)
|
297 |
+
|
298 |
+
where Ù
|
299 |
+
Xi(k) denotes the PSSS, the SSSS, or the DMRS, and
|
300 |
+
Ni is defined by
|
301 |
+
Ni =
|
302 |
+
®[5, 66], i = 2, 3, 12, 13
|
303 |
+
[0, 71], i = 5, 7, 10
|
304 |
+
.
|
305 |
+
(7)
|
306 |
+
Subsequently, based on ˆHi(k), we obtain the initial time
|
307 |
+
domain channel estimate by
|
308 |
+
ˆhi(n) = IDFTNi{ ˆHi(k)}, n ∈ Ni,
|
309 |
+
(8)
|
310 |
+
where IDFTNi{·} denotes the Ni-point IDFT and Ni is
|
311 |
+
defined by
|
312 |
+
Ni =
|
313 |
+
®62, i = 2, 3, 12, 13
|
314 |
+
72, i = 5, 7, 10
|
315 |
+
.
|
316 |
+
(9)
|
317 |
+
Since the channel impulse response is concentrated in a
|
318 |
+
few time domain samples while the noise and the RFF are
|
319 |
+
distributed over the entire time domain, we can apply an
|
320 |
+
appropriate window on ˆhi(n) to obtain an improved time
|
321 |
+
domain channel estimate by
|
322 |
+
˘hi(n) = ˆhi(n)wi(n), n ∈ Ni,
|
323 |
+
(10)
|
324 |
+
where wi(n) denotes the window function. Fig. 3 illustrates
|
325 |
+
the windowing operation, where a rectangular window is used.
|
326 |
+
Since most noises and RFFs are removed by the windowing
|
327 |
+
operation, the resultant channel estimate becomes more accu-
|
328 |
+
rate.
|
329 |
+
After obtaining ˘hi(n), we further acquire the corresponding
|
330 |
+
frequency domain channel estimate as
|
331 |
+
˘Hi(k) = DFTNi{˘hi(n)}, k ∈ Ni,
|
332 |
+
(11)
|
333 |
+
Considering the fact that the channels experienced by adjacent
|
334 |
+
symbols are approximately identical, especially when the
|
335 |
+
vehicle speed is not very high, we can further average adjacent
|
336 |
+
˘Hi(k)’s to suppress the noise, thus improving the channel
|
337 |
+
estimation accuracy. For instance, if the channel variation in
|
338 |
+
one subframe is negligible, the ultimate frequency domain
|
339 |
+
channel estimate can be calculated by
|
340 |
+
˜H(k)=
|
341 |
+
�
|
342 |
+
�
|
343 |
+
�
|
344 |
+
�
|
345 |
+
�
|
346 |
+
�
|
347 |
+
�
|
348 |
+
˘HPSSS(k) + ˘HDMRS(k) + ˘HSSSS(k)
|
349 |
+
7
|
350 |
+
, 5 ≤ k ≤ 66
|
351 |
+
˘HDMRS(k)
|
352 |
+
3
|
353 |
+
, 0 ≤ k ≤ 71
|
354 |
+
,
|
355 |
+
(12)
|
356 |
+
where
|
357 |
+
˘HPSSS(k) = ˘H2(k) + ˘H3(k),
|
358 |
+
(13)
|
359 |
+
˘HDMRS(k) = ˘H5(k) + ˘H7(k) + ˘H10(k),
|
360 |
+
(14)
|
361 |
+
˘HSSSS(k) = ˘H12(k) + ˘H13(k).
|
362 |
+
(15)
|
363 |
+
B. Channel Equalization
|
364 |
+
After acquiring the channel estimate ˜H(k), we can perform
|
365 |
+
channel equalization to remove the channel information and
|
366 |
+
achieve the initial RFF features Ri(k) by
|
367 |
+
Ri(k) =
|
368 |
+
ÙYi(k)
|
369 |
+
˜H(k)
|
370 |
+
, k ∈ Ni.
|
371 |
+
(16)
|
372 |
+
Note that the above channel equalization will not lead to a loss
|
373 |
+
of RFF information since most RFFs have been removed by
|
374 |
+
the windowing operation during the channel estimation stage.
|
375 |
+
C. RFF Denoising
|
376 |
+
According to (16), the initial RFF feature is still affected
|
377 |
+
by the noise in ÙYi(k). To alleviate the impact of noise
|
378 |
+
on the extracted RFF, we further average the initial RFFs
|
379 |
+
corresponding to the same data sequence. Specifically, the
|
380 |
+
denoised RFFs for the PSSS, the DMRS, and the SSSS are
|
381 |
+
given by
|
382 |
+
RPSSS(k) = R2(k) + R3(k)
|
383 |
+
2
|
384 |
+
, 5 ≤ k ≤ 66,
|
385 |
+
(17)
|
386 |
+
RDMRS(k)=
|
387 |
+
�
|
388 |
+
�
|
389 |
+
�
|
390 |
+
�
|
391 |
+
�
|
392 |
+
R5(k) + R7(k) + R10(k)
|
393 |
+
3
|
394 |
+
, N SL
|
395 |
+
ID mod 2=0
|
396 |
+
R5(k) + R10(k)
|
397 |
+
2
|
398 |
+
,
|
399 |
+
N SL
|
400 |
+
ID mod 2=1
|
401 |
+
,
|
402 |
+
0 ≤ k ≤ 71,
|
403 |
+
(18)
|
404 |
+
RSSSS(k) = R12(k) + R13(k)
|
405 |
+
2
|
406 |
+
, 5 ≤ k ≤ 66.
|
407 |
+
(19)
|
408 |
+
Note that the DMRS sequence on the 7th symbol differs from
|
409 |
+
those on the 5th and 10th symbols when the SLSS ID N SL
|
410 |
+
ID is
|
411 |
+
odd. Hence, for this case, we only calculate the mean of R5(k)
|
412 |
+
and R10(k) which have the same data sequence. Finally, we
|
413 |
+
obtain ultimate RFF features R(k) as
|
414 |
+
R(k) =
|
415 |
+
®RDMRS(k),
|
416 |
+
0 ≤ k ≤ 4, 67 ≤ k ≤ 71
|
417 |
+
[RPSSS(k), RDMRS(k), RSSSS(k)] , 5 ≤ k ≤ 66 .
|
418 |
+
(20)
|
419 |
+
IV. SIMULATION AND EXPERIMENT RESULTS
|
420 |
+
In the experiment, we employ 10 simulated LTE-V2X
|
421 |
+
terminals with different RFF parameters and 6 actual LTE-
|
422 |
+
V2X modules to generate PSBCH subframes, respectively,
|
423 |
+
and evaluate the classification performance of different devices
|
424 |
+
based on our proposed RFF extraction scheme.
|
425 |
+
A. Simulation Verification
|
426 |
+
For the simulation, we set different RFF parameters for 10
|
427 |
+
terminals, including the I/Q DC offsets, the baseband low-pass
|
428 |
+
filter coefficients, the gain imbalance, the phase quadrature
|
429 |
+
deviation, and the RF front-end power amplifier coefficients,
|
430 |
+
which are specifically shown in Table I, to ensure the modu-
|
431 |
+
lation domain error vector magnitude (EVM) is within 17.5%
|
432 |
+
[19].
|
433 |
+
Next, the PSBCH signals carrying the RFFs generated by
|
434 |
+
the 10 terminals pass through the simulated extended typical
|
435 |
+
urban (ETU) multipath channel [20], where the vehicle speed
|
436 |
+
ranges from 0 to 120 km/h. Moreover, the SNR ranges from
|
437 |
+
0 to 30 dB.
|
438 |
+
Then, we conduct classification experiments on 10 terminals
|
439 |
+
using random forest algorithm. The 700 received PSBCH
|
440 |
+
subframes of each terminal constitute the training set, where
|
441 |
+
the SNR is 30 dB and the vehicle speed is 30 km/h. The
|
442 |
+
test set consists of 300 other subframes from each terminal.
|
443 |
+
|
444 |
+
TABLE I
|
445 |
+
RFF PARAMETERS OF 10 SIMULATED LTE-V2X TERMINALS
|
446 |
+
Terminal index
|
447 |
+
DC offset
|
448 |
+
Filter coefficients
|
449 |
+
Gain imbalance
|
450 |
+
Phase deviation
|
451 |
+
Power amplifier coefficient
|
452 |
+
1
|
453 |
+
DI=0, DQ=0
|
454 |
+
hI=[1 0], hQ=[1 0]
|
455 |
+
0.1
|
456 |
+
0.1
|
457 |
+
[1 0 0]
|
458 |
+
2
|
459 |
+
DI=0.01, DQ=0
|
460 |
+
hI=[1 0], hQ=[1 0]
|
461 |
+
0.01
|
462 |
+
0.01
|
463 |
+
[1 0 0]
|
464 |
+
3
|
465 |
+
DI=0, DQ=-0.01
|
466 |
+
hI=[1 0], hQ=[1 0]
|
467 |
+
0
|
468 |
+
0
|
469 |
+
[1 0 0]
|
470 |
+
4
|
471 |
+
DI=-0.005, DQ=0.005
|
472 |
+
hI=[1 0], hQ=[1 0]
|
473 |
+
0.01
|
474 |
+
0.01
|
475 |
+
[1 0 0]
|
476 |
+
5
|
477 |
+
DI=0.005, DQ=-0.005
|
478 |
+
hI=[1 0], hQ=[1 0]
|
479 |
+
0
|
480 |
+
0
|
481 |
+
[1 0 0]
|
482 |
+
6
|
483 |
+
DI=0, DQ=0
|
484 |
+
hI=[1 0], hQ=[1 0]
|
485 |
+
0.05
|
486 |
+
0
|
487 |
+
[0.9+0.15j 0.1 0.1-0.15j]
|
488 |
+
7
|
489 |
+
DI=0, DQ=0
|
490 |
+
hI=[1 0], hQ=[1 0]
|
491 |
+
0
|
492 |
+
0.05
|
493 |
+
[1.15 -0.2 0]
|
494 |
+
8
|
495 |
+
DI=0, DQ=0
|
496 |
+
hI=[0.825 0], hQ=[1.175 0]
|
497 |
+
0
|
498 |
+
0
|
499 |
+
[1 0 0]
|
500 |
+
9
|
501 |
+
DI=0, DQ=0
|
502 |
+
hI=[1 0.175], hQ=[1 -0.175]
|
503 |
+
0
|
504 |
+
0
|
505 |
+
[1 0 0]
|
506 |
+
10
|
507 |
+
DI=0.005, DQ=0
|
508 |
+
hI=[0.95 0], hQ=[1 0.05]
|
509 |
+
0.05
|
510 |
+
0.05
|
511 |
+
[0.95-0.05j 0 0]
|
512 |
+
Accuracy (%)
|
513 |
+
Fig. 4. Identification accuracy of 10 simulated LTE-V2X terminals based
|
514 |
+
on the proposed RFF extraction method under different SNRs and different
|
515 |
+
vehicle speeds.
|
516 |
+
The identification accuracy of the 10 terminals under different
|
517 |
+
SNRs and different vehicle speeds is depicted in Fig. 4. It
|
518 |
+
can be found that the vehicle speed has little effect on the
|
519 |
+
RFF identification accuracy rate. When the SNR exceeds 10
|
520 |
+
dB, the accuracy always remains above 97% regardless of
|
521 |
+
the speed, while the accuracy decreases significantly when
|
522 |
+
the SNR drops below 10 dB mainly because we only use
|
523 |
+
one PSBCH subframe for RFF extraction. It reveals that the
|
524 |
+
proposed RFF extraction method has excellent classification
|
525 |
+
performance under medium and high SNRs.
|
526 |
+
Fig. 5 compares the RFF identification performances of the
|
527 |
+
methods with and without channel equalization, where the
|
528 |
+
SNR is 30 dB. When the speed increases from 0 to 120 km/h,
|
529 |
+
there is no obvious loss in the accuracy rate for the channel
|
530 |
+
equalization based method, which always remains over 99%,
|
531 |
+
while the identification accuracy without channel equalization
|
532 |
+
falls rapidly especially at high speeds, which indicates that our
|
533 |
+
proposed method based on channel estimation can effectively
|
534 |
+
mitigate the impact of wireless channels on the RFF extraction.
|
535 |
+
Accuracy (%)
|
536 |
+
Fig. 5. Comparison of the identification accuracy of 10 simulated LTE-V2X
|
537 |
+
terminals with and without channel equalization (SNR = 30 dB).
|
538 |
+
(a)
|
539 |
+
(b)
|
540 |
+
USRP B205
|
541 |
+
LTE-V2X module
|
542 |
+
GPS
|
543 |
+
Receiver
|
544 |
+
Transmitter
|
545 |
+
GPS
|
546 |
+
Fig. 6. Experiment setup: (a) receiving device (USRP B205); (b) transmitting
|
547 |
+
device (LTE-V2X module).
|
548 |
+
B. Experiment Verification
|
549 |
+
For the experiment, we use 6 LTE-V2X modules to transmit
|
550 |
+
PSBCH subframes and utilize USRP B205 to receive the
|
551 |
+
signals. The experiment setup is shown in Fig. 6. First, we
|
552 |
+
collect 400 PSBCH subframes for each module as training set
|
553 |
+
|
554 |
+
TABLE II
|
555 |
+
RFF IDENTIFICATION ACCURACY OF 6 LTE-V2X
|
556 |
+
MODULES UNDER DIFFERENT SPEEDS
|
557 |
+
Device
|
558 |
+
Accuracy
|
559 |
+
Speed
|
560 |
+
0 km/h
|
561 |
+
10 km/h
|
562 |
+
20 km/h
|
563 |
+
30 km/h
|
564 |
+
Module 1
|
565 |
+
92%
|
566 |
+
93%
|
567 |
+
90%
|
568 |
+
91%
|
569 |
+
Module 2
|
570 |
+
69%
|
571 |
+
71%
|
572 |
+
69%
|
573 |
+
68%
|
574 |
+
Module 3
|
575 |
+
92%
|
576 |
+
90%
|
577 |
+
93%
|
578 |
+
93%
|
579 |
+
Module 4
|
580 |
+
100%
|
581 |
+
100%
|
582 |
+
100%
|
583 |
+
97%
|
584 |
+
Module 5
|
585 |
+
100%
|
586 |
+
100%
|
587 |
+
100%
|
588 |
+
100%
|
589 |
+
Module 6
|
590 |
+
100%
|
591 |
+
100%
|
592 |
+
100%
|
593 |
+
100%
|
594 |
+
Average
|
595 |
+
92.2%
|
596 |
+
92.3%
|
597 |
+
92%
|
598 |
+
91.5%
|
599 |
+
under static state and low-speed moving state. Subsequently,
|
600 |
+
100 other subframes are captured from each module as test set,
|
601 |
+
where the speed ranges from 10 to 30 km/h. The classification
|
602 |
+
accuracy of the 6 LTE-V2X modules are shown in Table II. It
|
603 |
+
can be seen that the average accuracy exceeds 90%. Moreover,
|
604 |
+
the accuracy rate does not drop significantly after the speed
|
605 |
+
increases. Note that modules 1 to 4 belong to the same type
|
606 |
+
with very similar RFF features. Hence, the corresponding
|
607 |
+
classification accuracy is relatively low.
|
608 |
+
V. CONCLUSION
|
609 |
+
In this paper, we proposed a novel RFF extraction method
|
610 |
+
for LTE-V2X systems. Focusing on the PSSS, the SSSS,
|
611 |
+
and the DMRS of PSBCH, we successfully obtained highly
|
612 |
+
distinguishable RFF features by performing channel estima-
|
613 |
+
tion, channel equalization, and RFF denoising. As verified
|
614 |
+
via both simulations and experiments, our method displays
|
615 |
+
robust performance under challenging time-varying and mul-
|
616 |
+
tipath channels. The proposed method can also be applied
|
617 |
+
to any broadband multi-carrier communication systems that
|
618 |
+
have fixed sequences. In the future work, more terminals can
|
619 |
+
be tested in practical high mobility channel environments to
|
620 |
+
further verify the effectiveness of this method.
|
621 |
+
REFERENCES
|
622 |
+
[1] S. Gyawali, S. Xu, Y. Qian, and R. Q. Hu, “Challenges and solutions
|
623 |
+
for cellular based V2X communications,” IEEE Commun. Surveys Tuts.,
|
624 |
+
vol. 23, no. 1, pp. 222–255, 1st Quart., 2021.
|
625 |
+
[2] W. Anwar, N. Franchi, and G. Fettweis, “Physical layer evaluation
|
626 |
+
of V2X communications technologies: 5G NR-V2X, LTE-V2X, IEEE
|
627 |
+
802.11bd, and IEEE 802.11p,” in Proc. IEEE 90th Veh. Technol. Conf.
|
628 |
+
(VTC-Fall), Honolulu, HI, USA, Sept. 2019, pp. 1–7.
|
629 |
+
[3] C. Wang, Z. Li, X.-G. Xia, J. Shi, J. Si, and Y. Zou, “Physical
|
630 |
+
layer security enhancement using artificial noise in cellular vehicle-
|
631 |
+
to-everything (C-V2X) networks,” IEEE Trans. Veh. Technol., vol. 69,
|
632 |
+
no. 12, pp. 15 253–15 268, Dec. 2020.
|
633 |
+
[4] X. Luo, Y. Liu, H.-H. Chen, and Q. Guo, “Physical layer security in
|
634 |
+
intelligently connected vehicle networks,” IEEE Netw., vol. 34, no. 5,
|
635 |
+
pp. 232–239, Sept./Oct. 2020.
|
636 |
+
[5] M. Mosca, “Cybersecurity in an era with quantum computers: Will we
|
637 |
+
be ready?” IEEE Security Privacy, vol. 16, no. 5, pp. 38–41, Sept./Oct.
|
638 |
+
2018.
|
639 |
+
[6] K. Zeng, K. Govindan, and P. Mohapatra, “Non-cryptographic authen-
|
640 |
+
tication and identification in wireless networks [security and privacy in
|
641 |
+
emerging wireless networks],” IEEE Wireless Commun., vol. 17, no. 5,
|
642 |
+
pp. 56–62, Oct. 2010.
|
643 |
+
[7] A. M. Ali, E. Uzundurukan, and A. Kara, “Improvements on transient
|
644 |
+
signal detection for RF fingerprinting,” in Proc. 25th Signal Process.
|
645 |
+
Commun. Appl. Conf. (SIU), Antalya, Turkey, May. 2017, pp. 1–4.
|
646 |
+
[8] Y. Shi and M. A. Jensen, “Improved radiometric identification of
|
647 |
+
wireless devices using MIMO transmission,” IEEE Trans. Inf. Forensics
|
648 |
+
Security, vol. 6, no. 4, pp. 1346–1354, Dec. 2011.
|
649 |
+
[9] W. C. Suski II, M. A. Temple, M. J. Mendenhall, and R. F. Mills,
|
650 |
+
“Using spectral fingerprints to improve wireless network security,” in
|
651 |
+
Proc. IEEE Global Telecommun. Conf. (GLOBECOM), New Orleans,
|
652 |
+
LO, Nov. 2008, pp. 1–5.
|
653 |
+
[10] L. Peng, A. Hu, J. Zhang, Y. Jiang, J. Yu, and Y. Yan, “Design of a
|
654 |
+
hybrid RF fingerprint extraction and device classification scheme,” IEEE
|
655 |
+
Internet Things J., vol. 6, no. 1, pp. 349–360, Feb. 2019.
|
656 |
+
[11] G. Shen, J. Zhang, A. Marshall, and J. R. Cavallaro, “Towards scalable
|
657 |
+
and channel-robust radio frequency fingerprint identification for LoRa,”
|
658 |
+
IEEE Trans. Inf. Forensics Security, vol. 17, pp. 774–787, Feb. 2022.
|
659 |
+
[12] Y. Yang, A. Hu, Y. Xing, J. Yu, and Z. Zhang, “A data-independent radio
|
660 |
+
frequency fingerprint extraction scheme,” IEEE Wireless Commun. Lett.,
|
661 |
+
vol. 10, no. 11, pp. 2524–2527, Nov. 2021.
|
662 |
+
[13] L. Sun, X. Wang, Z. Huang, and B. Li, “Radio frequency fingerprint
|
663 |
+
extraction based on feature inhomogeneity,” IEEE Internet Things J.,
|
664 |
+
early access, Feb. 25, 2022, doi: 10.1109/JIOT.2022.3154595.
|
665 |
+
[14] W. Wang, Z. Sun, S. Piao, B. Zhu, and K. Ren, “Wireless physical-
|
666 |
+
layer identification: Modeling and validation,” IEEE Trans. Inf. Forensics
|
667 |
+
Security, vol. 11, no. 9, pp. 2091–2106, Sept. 2016.
|
668 |
+
[15] ETSI 3rd Generation Partnership Project, “LTE; Evolved Universal
|
669 |
+
Terrestrial Radio Access (E-UTRA); Physical channels and modulation
|
670 |
+
(Release 14),” Sophia Antipolis Cedex, Biarritz, France, 3GPP TS
|
671 |
+
36.211 version 14.2.0, 2016.
|
672 |
+
[16] T. M. Schmidl and D. C. Cox, “Robust frequency and timing synchro-
|
673 |
+
nization for OFDM,” IEEE Trans. Commun., vol. 45, no. 12, pp. 1613–
|
674 |
+
1621, Dec. 1997.
|
675 |
+
[17] J. J. van de Beek and M. Sandell, “ML estimation of time and frequency
|
676 |
+
offset in OFDM systems,” IEEE Trans. Signal Process., vol. 45, no. 7,
|
677 |
+
pp. 1800–1805, Jul. 1997.
|
678 |
+
[18] J. J. van de Beek, O. Edfors, M. Sandell, S. Wilson, and P. Borjesson,
|
679 |
+
“On channel estimation in OFDM systems,” in Proc. IEEE 45th Veh.
|
680 |
+
Technol. Conf., vol. 2, Chicago, IL, USA, Jul. 1995, pp. 815–819.
|
681 |
+
[19] ETSI 3rd Generation Partnership Project, “Technical Specification
|
682 |
+
Group Radio Access Network; Evolved Universal Terrestrial Radio Ac-
|
683 |
+
cess (E-UTRA); User Equipment (UE) radio transmission and reception
|
684 |
+
(Release 9),” Sophia Antipolis Cedex, Biarritz, France, 3GPP TS 36.101
|
685 |
+
version 9.4.0, 2010.
|
686 |
+
[20] ETSI 3rd Generation Partnership Project, “Technical Specification
|
687 |
+
Group Radio Access Network; Evolved Universal Terrestrial Radio
|
688 |
+
Access (E-UTRA); Base Station (BS) radio transmission and reception
|
689 |
+
(Release 14),” Sophia Antipolis Cedex, Biarritz, France, 3GPP TS
|
690 |
+
36.104 version 14.3.0, 2017.
|
691 |
+
|
2NAzT4oBgHgl3EQfe_yo/content/tmp_files/load_file.txt
ADDED
@@ -0,0 +1,434 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf,len=433
|
2 |
+
page_content='Radio Frequency Fingerprints Extraction for LTE-V2X: A Channel Estimation Based Methodology Tianshu Chen∗,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
3 |
+
page_content=' Hong Shen∗,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
4 |
+
page_content=' Aiqun Hu∗†,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
5 |
+
page_content=' Weihang He‡,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
6 |
+
page_content=' Jie Xu‡,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
7 |
+
page_content=' Hongxing Hu§ ∗National Mobile Communications Research Laboratory,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
8 |
+
page_content=' Southeast University,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
9 |
+
page_content=' Nanjing,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
10 |
+
page_content=' China †The Purple Mountain Laboratories for Network and Communication Security,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
11 |
+
page_content=' Nanjing,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
12 |
+
page_content=' China ‡School of Cyber Science and Engineering,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
13 |
+
page_content=' Southeast University,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
14 |
+
page_content=' Nanjing,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
15 |
+
page_content=' China §China Automotive Innovation Corporation,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
16 |
+
page_content=' Nanjing,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
17 |
+
page_content=' China Email: {iamtianshu,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
18 |
+
page_content=' shhseu,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
19 |
+
page_content=' aqhu,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
20 |
+
page_content=' 220205165,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
21 |
+
page_content=' 220205095}@seu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
22 |
+
page_content='edu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
23 |
+
page_content='cn, huhongxing@t3caic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
24 |
+
page_content='com Abstract—The vehicular-to-everything (V2X) technology has recently drawn a number of attentions from both academic and industrial areas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
25 |
+
page_content=' However, the openness of the wireless communi- cation system makes it more vulnerable to identity impersonation and information tampering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
26 |
+
page_content=' How to employ the powerful radio frequency fingerprint (RFF) identification technology in V2X systems turns out to be a vital and also challenging task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
27 |
+
page_content=' In this paper, we propose a novel RFF extraction method for Long Term Evolution-V2X (LTE-V2X) systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
28 |
+
page_content=' In order to conquer the difficulty of extracting transmitter RFF in the presence of wireless channel and receiver noise, we first estimate the wireless channel which excludes the RFF.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
29 |
+
page_content=' Then, we remove the impact of the wireless channel based on the channel estimate and obtain initial RFF features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
30 |
+
page_content=' Finally, we conduct RFF denoising to enhance the quality of the initial RFF.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
31 |
+
page_content=' Simulation and experiment results both demonstrate that our proposed RFF extraction scheme achieves a high identification accuracy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
32 |
+
page_content=' Furthermore, the performance is also robust to the vehicle speed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
33 |
+
page_content=' Index Terms—Vehicular-to-everything (V2X), radio frequency fingerprint (RFF), device identification, channel estimation, RFF denoising I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
34 |
+
page_content=' INTRODUCTION Vehicular-to-everything (V2X) has become a promising technique for intelligent transportation and autonomous driv- ing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
35 |
+
page_content=' In particular, the cellular-V2X (C-V2X) has been widely acknowledged as a key V2X communication standard due to its superior performance [1], [2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
36 |
+
page_content=' Since V2X relies on wireless transmission, the information is easy to be eavesdropped, forged or tampered with, which imposes great challenges on the safety of vehicles, pedestrians and road infrastructures in the V2X communication network [3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
37 |
+
page_content=' To deal with the security threats faced by wireless communications, there are usually two widely used authen- tication strategies: key-based cryptographic authentication and physical layer security-based non-cryptographic authentication [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
38 |
+
page_content=' The cryptographic authentication technology needs to © 2022 IEEE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
39 |
+
page_content=' Personal use of this material is permitted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
40 |
+
page_content=' Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
41 |
+
page_content=' distribute and manage abundant communication keys, which occupies computing resources and leads to additional overhead and delays.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
42 |
+
page_content=' Moreover, with the rapid development of comput- ing capability of the computers, especially the emergence of quantum computers, traditional cryptography technologies are more vulnerable to brute-force attacks [5].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
43 |
+
page_content=' On the contrary, the physical layer security based authentication has lower com- plexity and network overhead with lower latency compared to traditional cryptography-based authentication methods, and can achieve non-perceptual authentication without third-party facilities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
44 |
+
page_content=' One typical example is the radio frequency fin- gerprint (RFF) based authentication, which fully exploits the hardware differences between any two devices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
45 |
+
page_content=' Since the hardware characteristic of each device is unique and difficult to clone, the RFF based authentication can better resist the identity attacks and spoofing [6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
46 |
+
page_content=' In literature, a variety of RFF extraction and identification methods have been advocated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
47 |
+
page_content=' Early works mainly focus on the characteristics of transient signals, such as instantaneous amplitude, frequency, and phase responses [7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
48 |
+
page_content=' Concerning the steady-state signal, such as preamble signals, researchers consider extracting the RFF features including I/Q offset [8], power spectral density [9], differential constellation trace figure [10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
49 |
+
page_content=' Furthermore, some universal RFF extraction meth- ods which are independent of data, channel or modulation modes have also been studied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
50 |
+
page_content=' Concretely, Shen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
51 |
+
page_content=' [11] constructed channel independent spectrogram and utilized data augmentation for RFF extraction and identification of Lora devices, which achieves good performance under different channel conditions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
52 |
+
page_content=' Alternatively, Yang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
53 |
+
page_content=' [12] used random data segments to extract the tap coefficients of the least mean square (LMS) adaptive filter as data independent RFF.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
54 |
+
page_content=' Sun et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
55 |
+
page_content=' [13] verified the locality and inhomogeneity of the RFF distribution with the analysis in the cepstral domain, which yields modulation mode independent RFF.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
56 |
+
page_content=' The aforementioned works mainly consider the RFF extrac- tion for low mobility and narrowband systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
57 |
+
page_content=' However, for the V2X system, the channel typically varies fast due to the high mobility vehicles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
58 |
+
page_content=' In addition, the V2X signal usually arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
59 |
+
page_content='01446v1 [eess.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
60 |
+
page_content='SP] 4 Jan 2023 Signal preprocessing RFF feature extraction and denoising Device identification Digital baseband signal Mixer up-conversion Transmitter RFF model RF front-end power amplifier DAC Baseband low-pass filter TX antenna OBU OBU RSU V2P V2I Receiver Channel estimation and equalization RFF identification and access system I/Q DC offset Non-linearity Gain imbalance and phase deviation Frequency response deviation Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
61 |
+
page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
62 |
+
page_content=' LTE-V2X RFF extraction and identification system framework and RFF model at the transmitter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
63 |
+
page_content=' has a large bandwidth which is more vulnerable to multipath environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
64 |
+
page_content=' Therefore, the current RFF extraction methods for narrowband systems such as ZigBee and Lora cannot be directly applied for the V2X system because they do not take into account the impact of multipath and time-varying channels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
65 |
+
page_content=' In this work, we propose a channel estimation based RFF extraction method for Long Term Evolution-V2X (LTE-V2X) systems, which, to the best of our knowledge, has not been investigated in existing works.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
66 |
+
page_content=' Specifically, we first estimate the experienced wireless channel using an improved least square (LS) channel estimation method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
67 |
+
page_content=' Then, we perform channel equalization based on the channel estimate to obtain channel dependent RFF.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
68 |
+
page_content=' The RFF quality is further enhanced via conducting time-domain denoising.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
69 |
+
page_content=' It is worthwhile noting that the developed method eliminates the effect of the channel and the noise on the RFF with low implementation complex- ity, and can be extended to various broadband multi-carrier wireless communication systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
70 |
+
page_content=' This paper is organized as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
71 |
+
page_content=' Section II introduces the system model and signal preprocessing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
72 |
+
page_content=' Section III presents the details of the proposed RFF extraction methodology based on wireless channel estimation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
73 |
+
page_content=' Section IV evaluates the performance of the proposed RFF extraction method through simulations and experiments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
74 |
+
page_content=' Section V concludes this work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
75 |
+
page_content=' II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
76 |
+
page_content=' SYSTEM MODEL AND SIGNAL PREPROCESSING A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
77 |
+
page_content=' System Model Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
78 |
+
page_content=' 1 demonstrates the framework of the considered LTE- V2X RFF extraction and identification system together with the RFF model at the transmitter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
79 |
+
page_content=' More concretely, one V2X terminal, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
80 |
+
page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
81 |
+
page_content=', on board unit (OBU) or road side unit (RSU), first transmits data to other devices, where the transmitted signal includes the RFF of the transmitter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
82 |
+
page_content=' Then, the receiver preprocesses the received signal which consists of converting the RF signal to the baseband signal and performing time- frequency synchronization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
83 |
+
page_content=' Subsequently, the RFF features are extracted based on the synchronized signal, where the effects of the wireless channel and the noise on the RFF need to be mitigated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
84 |
+
page_content=' Finally, the device identification is performed using the extracted RFF features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
85 |
+
page_content=' It is necessary to note that the considered RFF refers to all the characteristics of the circuits at the transmitter, which, as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
86 |
+
page_content=' 1, include the I/Q DC offsets of the digital-to- analog converter (DAC), the frequency response deviation of the filter, the gain imbalance and the carrier phase quadrature deviation of the mixer, and the non-linearity of the power amplifier [14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
87 |
+
page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
88 |
+
page_content=' LTE-V2X PSBCH We adopt the physical sidelink broadcast channel (PSBCH) in LTE-V2X systems for RFF extraction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
89 |
+
page_content=' According to [15], PSBCH is transmitted every 160 ms occupying the central 6 PSBCH PSBCH PSSS PSSS DMRS PSBCH DMRS DMRS PSBCH PSBCH PSBCH SSSS SSSS GUARD 1 2 3 4 5 6 7 8 9 10 11 12 13 14 time 1 ms frequency 6 RBs Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
90 |
+
page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
91 |
+
page_content=' LTE-V2X PSBCH format.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
92 |
+
page_content=' resource blocks (RBs), i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
93 |
+
page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
94 |
+
page_content=', 72 subcarriers and 14 single-carrier frequency division multiple access (SC-FDMA) symbols.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
95 |
+
page_content=' The detailed format of PSBCH is shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
96 |
+
page_content=' 2, where primary sidelink synchronization signal (PSSS), secondary sidelink synchronization signal (SSSS), and demodulation reference signal (DMRS) all depend on the currently used sidelink synchronization signal (SLSS) ID.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
97 |
+
page_content=' Since the SLSS ID can be estimated [15], we can readily obtain ideal PSSS, SSSS, and DMRS at the receiver which are used for extracting transmitter RFF.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
98 |
+
page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
99 |
+
page_content=' Signal Preprocessing In order to ensure the stability of the extracted RFF, the signal preprocessing procedure includes time synchronization and carrier frequency offset (CFO) estimation and compensa- tion after the received signal is down-converted from the RF band to the baseband.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
100 |
+
page_content=' The time synchronization is realized by utilizing two identi- cal training symbols, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
101 |
+
page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
102 |
+
page_content=', two repeated PSSS or SSSS symbols in LTE-V2X PSBCH, and the cross-correlation between the received signal r(n) and the training signal x(n) as P(d)= N−1 � n=0 |r(n+d)x∗(n)|2+ N−1 � n=0 |r(n+d+N +NCP )x∗(n)|2 , (1) where N = 2048 for LTE-V2X systems and NCP denotes the length of the cyclic prefix (CP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
103 |
+
page_content=' When P(d) exceeds a given threshold PTH and reaches the maximum, we obtain the estimated starting position of the training symbol [16], which is expressed by ˆd = arg max d∈{d|P (d)>PTH} P(d).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
104 |
+
page_content=' (2) Afterwards,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
105 |
+
page_content=' the CFO is estimated by performing auto- correlation between adjacent two identical PSSS symbols and two identical SSSS symbols [17],' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
106 |
+
page_content=' which is expressed as ˆε = 1 2π(N +NCP )angle �N−1 � n=0 [r(n+ ˆd)r∗(n+ ˆd+N +NCP )] + N−1 � n=0 [r(n+∆n+ ˆd)r∗(n+∆n+ ˆd+N +NCP )] � ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
107 |
+
page_content=' (3) where angle{·} returns the phase angle of the input complex number and ∆n represents the number of the sampling points (a) initial time domain channel esti- mate h5(n) (b) windowed time domain channel estimate ˆh5(n) Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
108 |
+
page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
109 |
+
page_content=' The initial and windowed time domain channel estimates of the DMRS symbol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
110 |
+
page_content=' between the first PSSS and the first SSSS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
111 |
+
page_content=' Accordingly, we obtain the CFO compensated signal by y(n) = ˜r(n)e−j2πnˆε, (4) where ˜r(n) denotes the time synchronized signal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
112 |
+
page_content=' III.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
113 |
+
page_content=' PROPOSED RFF EXTRACTION METHOD In this section, we propose a novel PSBCH based RFF ex- traction method for LTE-V2X systems, which mainly includes channel estimation, channel equalization, and RFF denoising.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
114 |
+
page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
115 |
+
page_content=' Channel Estimation We adopt the improved LS algorithm [18] for channel estimation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
116 |
+
page_content=' The main idea of the algorithm is to obtain the initial frequency domain channel estimate through the LS algorithm, which is then transformed into the time domain via inverse discrete Fourier transform (IDFT).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
117 |
+
page_content=' Afterwards, we perform time-domain windowing to exclude the noise and the RFF.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
118 |
+
page_content=' The resultant signal is finally transformed into the frequency domain via discrete Fourier transform (DFT).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
119 |
+
page_content=' The detailed steps of channel estimation for the PSBCH subframe are described as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
120 |
+
page_content=' Denote the i-th time-domain SC-FDMA symbol of the received PSBCH after preprocessing and CP removal by yi(n), which carries RFF information and channel information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
121 |
+
page_content=' Then, we transform the time-domain received signals corresponding to the PSSS, the SSSS, and the DMRS symbols into the frequency domain by performing DFT, which is expressed as Yi(k) = DFTN{yi(n)}, 0 ≤ k ≤ N − 1, (5) where DFTN{·} denotes the N-point DFT and i = 2, 3, 5, 7, 10, 12, 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
122 |
+
page_content=' Denote the frequency domain received signal cor- responding to the effective bandwidth occupied by the PSSS, the SSSS, and the DMRS as ÙYi(k).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
123 |
+
page_content=' Then, the initial frequency domain channel estimate of the i-th symbol ˆHi(k) containing the RFF and the noise is calculated by ˆHi(k) = ÙYi(k) Ù Xi(k) , k ∈ Ni, (6) where Ù Xi(k) denotes the PSSS, the SSSS, or the DMRS, and Ni is defined by Ni = ®[5, 66], i = 2, 3, 12, 13 [0, 71], i = 5, 7, 10 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
124 |
+
page_content=' (7) Subsequently, based on ˆHi(k), we obtain the initial time domain channel estimate by ˆhi(n) = IDFTNi{ ˆHi(k)}, n ∈ Ni, (8) where IDFTNi{·} denotes the Ni-point IDFT and Ni is defined by Ni = ®62, i = 2, 3, 12, 13 72, i = 5, 7, 10 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
125 |
+
page_content=' (9) Since the channel impulse response is concentrated in a few time domain samples while the noise and the RFF are distributed over the entire time domain, we can apply an appropriate window on ˆhi(n) to obtain an improved time domain channel estimate by ˘hi(n) = ˆhi(n)wi(n), n ∈ Ni, (10) where wi(n) denotes the window function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
126 |
+
page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
127 |
+
page_content=' 3 illustrates the windowing operation, where a rectangular window is used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
128 |
+
page_content=' Since most noises and RFFs are removed by the windowing operation, the resultant channel estimate becomes more accu- rate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
129 |
+
page_content=' After obtaining ˘hi(n), we further acquire the corresponding frequency domain channel estimate as ˘Hi(k) = DFTNi{˘hi(n)}, k ∈ Ni, (11) Considering the fact that the channels experienced by adjacent symbols are approximately identical, especially when the vehicle speed is not very high, we can further average adjacent ˘Hi(k)’s to suppress the noise, thus improving the channel estimation accuracy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
130 |
+
page_content=' For instance, if the channel variation in one subframe is negligible, the ultimate frequency domain channel estimate can be calculated by ˜H(k)= � � � � � � � ˘HPSSS(k) + ˘HDMRS(k) + ˘HSSSS(k) 7 , 5 ≤ k ≤ 66 ˘HDMRS(k) 3 , 0 ≤ k ≤ 71 , (12) where ˘HPSSS(k) = ˘H2(k) + ˘H3(k), (13) ˘HDMRS(k) = ˘H5(k) + ˘H7(k) + ˘H10(k), (14) ˘HSSSS(k) = ˘H12(k) + ˘H13(k).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
131 |
+
page_content=' (15) B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
132 |
+
page_content=' Channel Equalization After acquiring the channel estimate ˜H(k), we can perform channel equalization to remove the channel information and achieve the initial RFF features Ri(k) by Ri(k) = ÙYi(k) ˜H(k) , k ∈ Ni.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
133 |
+
page_content=' (16) Note that the above channel equalization will not lead to a loss of RFF information since most RFFs have been removed by the windowing operation during the channel estimation stage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
134 |
+
page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
135 |
+
page_content=' RFF Denoising According to (16), the initial RFF feature is still affected by the noise in ÙYi(k).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
136 |
+
page_content=' To alleviate the impact of noise on the extracted RFF, we further average the initial RFFs corresponding to the same data sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
137 |
+
page_content=' Specifically, the denoised RFFs for the PSSS, the DMRS, and the SSSS are given by RPSSS(k) = R2(k) + R3(k) 2 , 5 ≤ k ≤ 66, (17) RDMRS(k)= � � � � � R5(k) + R7(k) + R10(k) 3 , N SL ID mod 2=0 R5(k) + R10(k) 2 , N SL ID mod 2=1 , 0 ≤ k ≤ 71, (18) RSSSS(k) = R12(k) + R13(k) 2 , 5 ≤ k ≤ 66.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
138 |
+
page_content=' (19) Note that the DMRS sequence on the 7th symbol differs from those on the 5th and 10th symbols when the SLSS ID N SL ID is odd.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
139 |
+
page_content=' Hence, for this case, we only calculate the mean of R5(k) and R10(k) which have the same data sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
140 |
+
page_content=' Finally, we obtain ultimate RFF features R(k) as R(k) = ®RDMRS(k), 0 ≤ k ≤ 4, 67 ≤ k ≤ 71 [RPSSS(k), RDMRS(k), RSSSS(k)] , 5 ≤ k ≤ 66 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
141 |
+
page_content=' (20) IV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
142 |
+
page_content=' SIMULATION AND EXPERIMENT RESULTS In the experiment, we employ 10 simulated LTE-V2X terminals with different RFF parameters and 6 actual LTE- V2X modules to generate PSBCH subframes, respectively, and evaluate the classification performance of different devices based on our proposed RFF extraction scheme.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
143 |
+
page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
144 |
+
page_content=' Simulation Verification For the simulation, we set different RFF parameters for 10 terminals, including the I/Q DC offsets, the baseband low-pass filter coefficients, the gain imbalance, the phase quadrature deviation, and the RF front-end power amplifier coefficients, which are specifically shown in Table I, to ensure the modu- lation domain error vector magnitude (EVM) is within 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
145 |
+
page_content='5% [19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
146 |
+
page_content=' Next, the PSBCH signals carrying the RFFs generated by the 10 terminals pass through the simulated extended typical urban (ETU) multipath channel [20], where the vehicle speed ranges from 0 to 120 km/h.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
147 |
+
page_content=' Moreover, the SNR ranges from 0 to 30 dB.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
148 |
+
page_content=' Then, we conduct classification experiments on 10 terminals using random forest algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
149 |
+
page_content=' The 700 received PSBCH subframes of each terminal constitute the training set, where the SNR is 30 dB and the vehicle speed is 30 km/h.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
150 |
+
page_content=' The test set consists of 300 other subframes from each terminal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
151 |
+
page_content=' TABLE I RFF PARAMETERS OF 10 SIMULATED LTE-V2X TERMINALS Terminal index DC offset Filter coefficients Gain imbalance Phase deviation Power amplifier coefficient 1 DI=0, DQ=0 hI=[1 0], hQ=[1 0] 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
152 |
+
page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
153 |
+
page_content='1 [1 0 0] 2 DI=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
154 |
+
page_content='01, DQ=0 hI=[1 0], hQ=[1 0] 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
155 |
+
page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
156 |
+
page_content='01 [1 0 0] 3 DI=0, DQ=-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
157 |
+
page_content='01 hI=[1 0], hQ=[1 0] 0 0 [1 0 0] 4 DI=-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
158 |
+
page_content='005, DQ=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
159 |
+
page_content='005 hI=[1 0], hQ=[1 0] 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
160 |
+
page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
161 |
+
page_content='01 [1 0 0] 5 DI=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
162 |
+
page_content='005, DQ=-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
163 |
+
page_content='005 hI=[1 0], hQ=[1 0] 0 0 [1 0 0] 6 DI=0, DQ=0 hI=[1 0], hQ=[1 0] 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
164 |
+
page_content='05 0 [0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
165 |
+
page_content='9+0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
166 |
+
page_content='15j 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
167 |
+
page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
168 |
+
page_content='1-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
169 |
+
page_content='15j] 7 DI=0, DQ=0 hI=[1 0], hQ=[1 0] 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
170 |
+
page_content='05 [1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
171 |
+
page_content='15 -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
172 |
+
page_content='2 0] 8 DI=0, DQ=0 hI=[0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
173 |
+
page_content='825 0], hQ=[1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
174 |
+
page_content='175 0] 0 0 [1 0 0] 9 DI=0, DQ=0 hI=[1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
175 |
+
page_content='175], hQ=[1 -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
176 |
+
page_content='175] 0 0 [1 0 0] 10 DI=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
177 |
+
page_content='005, DQ=0 hI=[0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
178 |
+
page_content='95 0], hQ=[1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
179 |
+
page_content='05] 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
180 |
+
page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
181 |
+
page_content='05 [0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
182 |
+
page_content='95-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
183 |
+
page_content='05j 0 0] Accuracy (%) Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
184 |
+
page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
185 |
+
page_content=' Identification accuracy of 10 simulated LTE-V2X terminals based on the proposed RFF extraction method under different SNRs and different vehicle speeds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
186 |
+
page_content=' The identification accuracy of the 10 terminals under different SNRs and different vehicle speeds is depicted in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
187 |
+
page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
188 |
+
page_content=' It can be found that the vehicle speed has little effect on the RFF identification accuracy rate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
189 |
+
page_content=' When the SNR exceeds 10 dB, the accuracy always remains above 97% regardless of the speed, while the accuracy decreases significantly when the SNR drops below 10 dB mainly because we only use one PSBCH subframe for RFF extraction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
190 |
+
page_content=' It reveals that the proposed RFF extraction method has excellent classification performance under medium and high SNRs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
191 |
+
page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
192 |
+
page_content=' 5 compares the RFF identification performances of the methods with and without channel equalization, where the SNR is 30 dB.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
193 |
+
page_content=' When the speed increases from 0 to 120 km/h, there is no obvious loss in the accuracy rate for the channel equalization based method, which always remains over 99%, while the identification accuracy without channel equalization falls rapidly especially at high speeds, which indicates that our proposed method based on channel estimation can effectively mitigate the impact of wireless channels on the RFF extraction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
194 |
+
page_content=' Accuracy (%) Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
195 |
+
page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
196 |
+
page_content=' Comparison of the identification accuracy of 10 simulated LTE-V2X terminals with and without channel equalization (SNR = 30 dB).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
197 |
+
page_content=' (a) (b) USRP B205 LTE-V2X module GPS Receiver Transmitter GPS Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
198 |
+
page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
199 |
+
page_content=' Experiment setup: (a) receiving device (USRP B205);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
200 |
+
page_content=' (b) transmitting device (LTE-V2X module).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
201 |
+
page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
202 |
+
page_content=' Experiment Verification For the experiment, we use 6 LTE-V2X modules to transmit PSBCH subframes and utilize USRP B205 to receive the signals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
203 |
+
page_content=' The experiment setup is shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
204 |
+
page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
205 |
+
page_content=' First, we collect 400 PSBCH subframes for each module as training set TABLE II RFF IDENTIFICATION ACCURACY OF 6 LTE-V2X MODULES UNDER DIFFERENT SPEEDS Device Accuracy Speed 0 km/h 10 km/h 20 km/h 30 km/h Module 1 92% 93% 90% 91% Module 2 69% 71% 69% 68% Module 3 92% 90% 93% 93% Module 4 100% 100% 100% 97% Module 5 100% 100% 100% 100% Module 6 100% 100% 100% 100% Average 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
206 |
+
page_content='2% 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
207 |
+
page_content='3% 92% 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
208 |
+
page_content='5% under static state and low-speed moving state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
209 |
+
page_content=' Subsequently, 100 other subframes are captured from each module as test set, where the speed ranges from 10 to 30 km/h.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
210 |
+
page_content=' The classification accuracy of the 6 LTE-V2X modules are shown in Table II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
211 |
+
page_content=' It can be seen that the average accuracy exceeds 90%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
212 |
+
page_content=' Moreover, the accuracy rate does not drop significantly after the speed increases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
213 |
+
page_content=' Note that modules 1 to 4 belong to the same type with very similar RFF features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
214 |
+
page_content=' Hence, the corresponding classification accuracy is relatively low.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
215 |
+
page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
216 |
+
page_content=' CONCLUSION In this paper, we proposed a novel RFF extraction method for LTE-V2X systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
217 |
+
page_content=' Focusing on the PSSS, the SSSS, and the DMRS of PSBCH, we successfully obtained highly distinguishable RFF features by performing channel estima- tion, channel equalization, and RFF denoising.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
218 |
+
page_content=' As verified via both simulations and experiments, our method displays robust performance under challenging time-varying and mul- tipath channels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
219 |
+
page_content=' The proposed method can also be applied to any broadband multi-carrier communication systems that have fixed sequences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
220 |
+
page_content=' In the future work, more terminals can be tested in practical high mobility channel environments to further verify the effectiveness of this method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
221 |
+
page_content=' REFERENCES [1] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
222 |
+
page_content=' Gyawali, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
223 |
+
page_content=' Xu, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
224 |
+
page_content=' Qian, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
225 |
+
page_content=' Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
226 |
+
page_content=' Hu, “Challenges and solutions for cellular based V2X communications,” IEEE Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
227 |
+
page_content=' Surveys Tuts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
228 |
+
page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
229 |
+
page_content=' 23, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
230 |
+
page_content=' 1, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
231 |
+
page_content=' 222–255, 1st Quart.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
232 |
+
page_content=', 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
233 |
+
page_content=' [2] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
234 |
+
page_content=' Anwar, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
235 |
+
page_content=' Franchi, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
236 |
+
page_content=' Fettweis, “Physical layer evaluation of V2X communications technologies: 5G NR-V2X, LTE-V2X, IEEE 802.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
237 |
+
page_content='11bd, and IEEE 802.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
238 |
+
page_content='11p,” in Proc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
239 |
+
page_content=' IEEE 90th Veh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
240 |
+
page_content=' Technol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
241 |
+
page_content=' Conf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
242 |
+
page_content=' (VTC-Fall), Honolulu, HI, USA, Sept.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
243 |
+
page_content=' 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
244 |
+
page_content=' 1–7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
245 |
+
page_content=' [3] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
246 |
+
page_content=' Wang, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
247 |
+
page_content=' Li, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
248 |
+
page_content='-G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
249 |
+
page_content=' Xia, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
250 |
+
page_content=' Shi, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
251 |
+
page_content=' Si, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
252 |
+
page_content=' Zou, “Physical layer security enhancement using artificial noise in cellular vehicle- to-everything (C-V2X) networks,” IEEE Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
253 |
+
page_content=' Veh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
254 |
+
page_content=' Technol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
255 |
+
page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
256 |
+
page_content=' 69, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
257 |
+
page_content=' 12, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
258 |
+
page_content=' 15 253–15 268, Dec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
259 |
+
page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
260 |
+
page_content=' [4] X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
261 |
+
page_content=' Luo, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
262 |
+
page_content=' Liu, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
263 |
+
page_content='-H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
264 |
+
page_content=' Chen, and Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
265 |
+
page_content=' Guo, “Physical layer security in intelligently connected vehicle networks,” IEEE Netw.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
266 |
+
page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
267 |
+
page_content=' 34, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
268 |
+
page_content=' 5, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
269 |
+
page_content=' 232–239, Sept.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
270 |
+
page_content='/Oct.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
271 |
+
page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
272 |
+
page_content=' [5] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
273 |
+
page_content=' Mosca, “Cybersecurity in an era with quantum computers: Will we be ready?”' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
274 |
+
page_content=' IEEE Security Privacy, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
275 |
+
page_content=' 16, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
276 |
+
page_content=' 5, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
277 |
+
page_content=' 38–41, Sept.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
278 |
+
page_content='/Oct.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
279 |
+
page_content=' 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
280 |
+
page_content=' [6] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
281 |
+
page_content=' Zeng, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
282 |
+
page_content=' Govindan, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
283 |
+
page_content=' Mohapatra, “Non-cryptographic authen- tication and identification in wireless networks [security and privacy in emerging wireless networks],” IEEE Wireless Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
284 |
+
page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
285 |
+
page_content=' 17, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
286 |
+
page_content=' 5, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
287 |
+
page_content=' 56–62, Oct.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
288 |
+
page_content=' 2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
289 |
+
page_content=' [7] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
290 |
+
page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
291 |
+
page_content=' Ali, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
292 |
+
page_content=' Uzundurukan, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
293 |
+
page_content=' Kara, “Improvements on transient signal detection for RF fingerprinting,” in Proc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
294 |
+
page_content=' 25th Signal Process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
295 |
+
page_content=' Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
296 |
+
page_content=' Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
297 |
+
page_content=' Conf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
298 |
+
page_content=' (SIU), Antalya, Turkey, May.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
299 |
+
page_content=' 2017, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
300 |
+
page_content=' 1–4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
301 |
+
page_content=' [8] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
302 |
+
page_content=' Shi and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
303 |
+
page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
304 |
+
page_content=' Jensen, “Improved radiometric identification of wireless devices using MIMO transmission,” IEEE Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
305 |
+
page_content=' Inf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
306 |
+
page_content=' Forensics Security, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
307 |
+
page_content=' 6, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
308 |
+
page_content=' 4, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
309 |
+
page_content=' 1346–1354, Dec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
310 |
+
page_content=' 2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
311 |
+
page_content=' [9] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
312 |
+
page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
313 |
+
page_content=' Suski II, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
314 |
+
page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
315 |
+
page_content=' Temple, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
316 |
+
page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
317 |
+
page_content=' Mendenhall, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
318 |
+
page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
319 |
+
page_content=' Mills, “Using spectral fingerprints to improve wireless network security,” in Proc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
320 |
+
page_content=' IEEE Global Telecommun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
321 |
+
page_content=' Conf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
322 |
+
page_content=' (GLOBECOM), New Orleans, LO, Nov.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
323 |
+
page_content=' 2008, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
324 |
+
page_content=' 1–5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
325 |
+
page_content=' [10] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
326 |
+
page_content=' Peng, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
327 |
+
page_content=' Hu, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
328 |
+
page_content=' Zhang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
329 |
+
page_content=' Jiang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
330 |
+
page_content=' Yu, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
331 |
+
page_content=' Yan, “Design of a hybrid RF fingerprint extraction and device classification scheme,” IEEE Internet Things J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
332 |
+
page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
333 |
+
page_content=' 6, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
334 |
+
page_content=' 1, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
335 |
+
page_content=' 349–360, Feb.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
336 |
+
page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
337 |
+
page_content=' [11] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
338 |
+
page_content=' Shen, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
339 |
+
page_content=' Zhang, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
340 |
+
page_content=' Marshall, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
341 |
+
page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
342 |
+
page_content=' Cavallaro, “Towards scalable and channel-robust radio frequency fingerprint identification for LoRa,” IEEE Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
343 |
+
page_content=' Inf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
344 |
+
page_content=' Forensics Security, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
345 |
+
page_content=' 17, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
346 |
+
page_content=' 774–787, Feb.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
347 |
+
page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
348 |
+
page_content=' [12] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
349 |
+
page_content=' Yang, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
350 |
+
page_content=' Hu, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
351 |
+
page_content=' Xing, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
352 |
+
page_content=' Yu, and Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
353 |
+
page_content=' Zhang, “A data-independent radio frequency fingerprint extraction scheme,” IEEE Wireless Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
354 |
+
page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
355 |
+
page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
356 |
+
page_content=' 10, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
357 |
+
page_content=' 11, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
358 |
+
page_content=' 2524–2527, Nov.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
359 |
+
page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
360 |
+
page_content=' [13] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
361 |
+
page_content=' Sun, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
362 |
+
page_content=' Wang, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
363 |
+
page_content=' Huang, and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
364 |
+
page_content=' Li, “Radio frequency fingerprint extraction based on feature inhomogeneity,” IEEE Internet Things J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
365 |
+
page_content=', early access, Feb.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
366 |
+
page_content=' 25, 2022, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
367 |
+
page_content='1109/JIOT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
368 |
+
page_content='2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
369 |
+
page_content='3154595.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
370 |
+
page_content=' [14] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
371 |
+
page_content=' Wang, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
372 |
+
page_content=' Sun, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
373 |
+
page_content=' Piao, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
374 |
+
page_content=' Zhu, and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
375 |
+
page_content=' Ren, “Wireless physical- layer identification: Modeling and validation,” IEEE Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
376 |
+
page_content=' Inf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
377 |
+
page_content=' Forensics Security, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
378 |
+
page_content=' 11, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
379 |
+
page_content=' 9, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
380 |
+
page_content=' 2091–2106, Sept.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
381 |
+
page_content=' 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
382 |
+
page_content=' [15] ETSI 3rd Generation Partnership Project, “LTE;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
383 |
+
page_content=' Evolved Universal Terrestrial Radio Access (E-UTRA);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
384 |
+
page_content=' Physical channels and modulation (Release 14),” Sophia Antipolis Cedex, Biarritz, France, 3GPP TS 36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
385 |
+
page_content='211 version 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
386 |
+
page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
387 |
+
page_content='0, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
388 |
+
page_content=' [16] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
389 |
+
page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
390 |
+
page_content=' Schmidl and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
391 |
+
page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
392 |
+
page_content=' Cox, “Robust frequency and timing synchro- nization for OFDM,” IEEE Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
393 |
+
page_content=' Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
394 |
+
page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
395 |
+
page_content=' 45, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
396 |
+
page_content=' 12, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
397 |
+
page_content=' 1613– 1621, Dec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
398 |
+
page_content=' 1997.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
399 |
+
page_content=' [17] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
400 |
+
page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
401 |
+
page_content=' van de Beek and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
402 |
+
page_content=' Sandell, “ML estimation of time and frequency offset in OFDM systems,” IEEE Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
403 |
+
page_content=' Signal Process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
404 |
+
page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
405 |
+
page_content=' 45, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
406 |
+
page_content=' 7, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
407 |
+
page_content=' 1800–1805, Jul.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
408 |
+
page_content=' 1997.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
409 |
+
page_content=' [18] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
410 |
+
page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
411 |
+
page_content=' van de Beek, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
412 |
+
page_content=' Edfors, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
413 |
+
page_content=' Sandell, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
414 |
+
page_content=' Wilson, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
415 |
+
page_content=' Borjesson, “On channel estimation in OFDM systems,” in Proc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
416 |
+
page_content=' IEEE 45th Veh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
417 |
+
page_content=' Technol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
418 |
+
page_content=' Conf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
419 |
+
page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
420 |
+
page_content=' 2, Chicago, IL, USA, Jul.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
421 |
+
page_content=' 1995, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
422 |
+
page_content=' 815–819.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
423 |
+
page_content=' [19] ETSI 3rd Generation Partnership Project, “Technical Specification Group Radio Access Network;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
424 |
+
page_content=' Evolved Universal Terrestrial Radio Ac- cess (E-UTRA);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
425 |
+
page_content=' User Equipment (UE) radio transmission and reception (Release 9),” Sophia Antipolis Cedex, Biarritz, France, 3GPP TS 36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
426 |
+
page_content='101 version 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
427 |
+
page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
428 |
+
page_content='0, 2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
429 |
+
page_content=' [20] ETSI 3rd Generation Partnership Project, “Technical Specification Group Radio Access Network;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
430 |
+
page_content=' Evolved Universal Terrestrial Radio Access (E-UTRA);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
431 |
+
page_content=' Base Station (BS) radio transmission and reception (Release 14),” Sophia Antipolis Cedex, Biarritz, France, 3GPP TS 36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
432 |
+
page_content='104 version 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
433 |
+
page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
434 |
+
page_content='0, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NAzT4oBgHgl3EQfe_yo/content/2301.01446v1.pdf'}
|
2tAzT4oBgHgl3EQfDvpk/content/tmp_files/2301.00981v1.pdf.txt
ADDED
@@ -0,0 +1,655 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Transfer Generative Adversarial Networks
|
2 |
+
(T-GAN)-based Terahertz Channel Modeling
|
3 |
+
Zhengdong Hu, Yuanbo Li, and Chong Han
|
4 |
+
Terahertz Wireless Communications (TWC) Laboratory, Shanghai Jiao Tong University, China.
|
5 |
+
Email: {huzhengdong, yuanbo.li, chong.han}@sjtu.edu.cn
|
6 |
+
Abstract—Terahertz (THz) communications are envisioned as
|
7 |
+
a promising technology for 6G and beyond wireless systems,
|
8 |
+
providing ultra-broad bandwidth and thus Terabit-per-second
|
9 |
+
(Tbps) data rates. However, as foundation of designing THz
|
10 |
+
communications, channel modeling and characterization are
|
11 |
+
fundamental to scrutinize the potential of the new spectrum.
|
12 |
+
Relied on physical measurements, traditional statistical channel
|
13 |
+
modeling methods suffer from the problem of low accuracy
|
14 |
+
with the assumed certain distributions and empirical parameters.
|
15 |
+
Moreover, it is time-consuming and expensive to acquire extensive
|
16 |
+
channel measurement in the THz band. In this paper, a transfer
|
17 |
+
generative adversarial network (T-GAN) based modeling method
|
18 |
+
is proposed in the THz band, which exploits the advantage of
|
19 |
+
GAN in modeling the complex distribution, and the benefit of
|
20 |
+
transfer learning in transferring the knowledge from a source
|
21 |
+
task to improve generalization about the target task with limited
|
22 |
+
training data. Specifically, to start with, the proposed GAN is pre-
|
23 |
+
trained using the simulated dataset, generated by the standard
|
24 |
+
channel model from 3rd generation partnerships project (3GPP).
|
25 |
+
Furthermore, by transferring the knowledge and fine-tuning the
|
26 |
+
pre-trained GAN, the T-GAN is developed by using the THz mea-
|
27 |
+
sured dataset with a small amount. Experimental results reveal
|
28 |
+
that the distribution of PDPs generated by the proposed T-GAN
|
29 |
+
method shows good agreement with measurement. Moreover, T-
|
30 |
+
GAN achieves good performance in channel modeling, with 9 dB
|
31 |
+
improved root-mean-square error (RMSE) and higher Structure
|
32 |
+
Similarity Index Measure (SSIM), compared with traditional
|
33 |
+
3GPP method.
|
34 |
+
I. INTRODUCTION
|
35 |
+
With the exponential growth of the number of intercon-
|
36 |
+
nected devices, the sixth generation (6G) is expected to achieve
|
37 |
+
intelligent connections of everything, anywhere, anytime [1],
|
38 |
+
which demands Tbit/s wireless data rates. To fulfill the de-
|
39 |
+
mand, Terahertz (THz) communications gain increasing atten-
|
40 |
+
tion as a vital technology of 6G systems, thanks to the ultra-
|
41 |
+
broad bandwidth ranging from tens of GHz to hundreds of
|
42 |
+
GHz [2]. The THz band is promising to address the spectrum
|
43 |
+
scarcity and capacity limitations of current wireless systems,
|
44 |
+
and realize long-awaited applications, extending from wireless
|
45 |
+
cognition, localization/positioning, to integrated sensing and
|
46 |
+
communication [3].
|
47 |
+
To design reliable THz wireless systems, one fundamental
|
48 |
+
challenge lies in developing an accurate channel model to por-
|
49 |
+
tray the propagation phenomena. Due to the high frequencies,
|
50 |
+
new characteristics occur in the THz band, such as frequency-
|
51 |
+
selective absorption loss and rough-surface scattering. At-
|
52 |
+
tribute to these new characteristics, THz channel modeling is
|
53 |
+
required to capture these characteristics. However, traditional
|
54 |
+
statistical channel modeling methods suffer from the prob-
|
55 |
+
lem of low accuracy with the assumed certain distributions
|
56 |
+
and empirical parameters. For example, a geometric based
|
57 |
+
stochastic channel model (GSCM) assumes that the positions
|
58 |
+
of scatters follow certain statistical distributions, such as the
|
59 |
+
uniform distribution within a circle around the transmitters
|
60 |
+
and receivers [4]. However, the positions of scatters are hard
|
61 |
+
to characterize by certain statistical distributions, making the
|
62 |
+
GSCM not accurate for utilization in the THz band. Moreover,
|
63 |
+
it is time-consuming and costly to acquire extensive channel
|
64 |
+
measurement for THz channel modeling. To this end, an
|
65 |
+
accurate channel modeling method with limited measurement
|
66 |
+
data for the THz band is needed.
|
67 |
+
Recently, deep learning (DL) is popular and widely applied
|
68 |
+
in wireless communications, such as channel estimation [5],
|
69 |
+
[6] and channel state information (CSI) feedback [7]. Among
|
70 |
+
different kinds of DL methods, the generative adversarial
|
71 |
+
network (GAN) has the advantage of modeling complex dis-
|
72 |
+
tribution accurately without any statistical assumptions, based
|
73 |
+
on which GAN can be utilized to develop channel models. The
|
74 |
+
authors in [8] train GAN to approximate the probability distri-
|
75 |
+
bution functions (PDFs) of stochastic channel response. In [9],
|
76 |
+
GAN is applied to generate synthetic channel samples close
|
77 |
+
to the distribution of real channel samples. The researchers
|
78 |
+
in [10] model the channel with GAN through channel input-
|
79 |
+
output measurements. In [11], a model-driven GAN-based
|
80 |
+
channel modeling method is developed in intelligent reflecting
|
81 |
+
surface (IRS) aided communication system. These methods
|
82 |
+
achieve good performance in modeling the channel and prove
|
83 |
+
high consistency between the target channel distribution and
|
84 |
+
the generated channel distribution. However, the GAN based
|
85 |
+
channel modeling method has not been exploited in the THz
|
86 |
+
band. Moreover, it is a challenge to train GAN for channel
|
87 |
+
modeling with the scarce THz channel measurement dataset.
|
88 |
+
In this paper, a transfer GAN (T-GAN)-based THz channel
|
89 |
+
modeling method is proposed, which can learn the distribution
|
90 |
+
of power delay profile (PDP) of the THz channel. Moreover,
|
91 |
+
to tackle the challenge of limited channel measurement in
|
92 |
+
the THz band, the transfer learning technique is introduced
|
93 |
+
in T-GAN, which reduces the size requirement of channel
|
94 |
+
dataset for training and enhances the performance of channel
|
95 |
+
modeling, through transferring the knowledge stored in a pre-
|
96 |
+
trained model to a new model [12], [13]. Furthermore, the
|
97 |
+
performance of T-GAN in modeling the channel distribution
|
98 |
+
is validated by real measurements [14].
|
99 |
+
The contributions of this paper are listed as follows.
|
100 |
+
• We propose a T-GAN based THz channel modeling
|
101 |
+
arXiv:2301.00981v1 [eess.SP] 3 Jan 2023
|
102 |
+
|
103 |
+
method, in which a GAN is designed to capture the
|
104 |
+
distribution of PDPs of the THz channel, by training on
|
105 |
+
the dataset of PDP samples.
|
106 |
+
• To tackle the challenge of limited measurement data
|
107 |
+
for THz channel modeling, transfer learning is further
|
108 |
+
exploited by T-GAN, which reduces the size requirement
|
109 |
+
of training dataset, and enhances the performance of
|
110 |
+
GAN, through transferring the knowledge stored in a pre-
|
111 |
+
trained model to a new model.
|
112 |
+
The rest of the sections are organized as follows. Sec. II
|
113 |
+
details the proposed T-GAN based channel modeling method.
|
114 |
+
Sec. III demonstrates the performance of the proposed T-GAN
|
115 |
+
method. The paper is concluded in Sec. IV.
|
116 |
+
Notation: a is a scalar. a denotes a vector. A represents a
|
117 |
+
matrix. E{·} describes the expectation. ∇ denotes the gradient
|
118 |
+
operation. ∥·∥ represent the L2 norm. IN defines an N dimen-
|
119 |
+
sional identity matrix. N denotes the normal distribution.
|
120 |
+
II. TRANSFER GAN (T-GAN) BASED CHANNEL
|
121 |
+
MODELING
|
122 |
+
In this section, the channel modeling problem is first for-
|
123 |
+
mulated into a channel distribution learning problem. Then,
|
124 |
+
the proposed GAN in T-GAN method is elaborated. Finally,
|
125 |
+
T-GAN is presented.
|
126 |
+
A. Problem Formulation
|
127 |
+
The channel impulse response (CIR) can be represented as
|
128 |
+
h(τ) =
|
129 |
+
L−1
|
130 |
+
�
|
131 |
+
l=0
|
132 |
+
αlejφlδ(τ − τl),
|
133 |
+
(1)
|
134 |
+
where τl denotes the delay of the lth multi-path components
|
135 |
+
(MPCs), L denotes the number of MPC, αl refers to the path
|
136 |
+
gain and φl represents the phase of the corresponding MPC.
|
137 |
+
To characterize the channel, PDP is an important feature,
|
138 |
+
which indicates the dispersion of power over the time delay,
|
139 |
+
specifically, the received power with respect to the delay in
|
140 |
+
a multi-path channel. It can be extracted from the channel
|
141 |
+
impulse response by
|
142 |
+
P(τ) = |h(τ)|2,
|
143 |
+
(2)
|
144 |
+
Then, the channel modeling problem is exploited by learning
|
145 |
+
the distribution of PDPs denoted by pr, which is difficult to
|
146 |
+
be analytically represented. Instead, the distribution pr can be
|
147 |
+
captured by generating fake PDP samples with distribution pg,
|
148 |
+
such that the generated distribution pg of PDPs can match the
|
149 |
+
actual distribution pr.
|
150 |
+
B. Proposed GAN
|
151 |
+
The GAN can be utilized to learn the distribution of
|
152 |
+
PDPs denoted by pr, with the framework depicted in Fig 1.
|
153 |
+
The GAN consists of two sub-networks, namely, generator
|
154 |
+
and discriminator. The generator is aimed at generating fake
|
155 |
+
samples G(z) to fool the discriminator, in which z is the noise
|
156 |
+
sample, by mapping the input noise distribution pz(z) to the
|
157 |
+
generated distribution pg = p(G(z)). The discriminator tries to
|
158 |
+
Fig. 1. Framework of GAN.
|
159 |
+
distinguish between real samples x from pr and fake samples
|
160 |
+
G(z) from pg, and the output of the discriminator D(x) and
|
161 |
+
D(G(z)) can be treated as the probability of being a real
|
162 |
+
sample. The two networks are trained in an adversarial manner,
|
163 |
+
which can be considered as a two-player zero-sum minimax
|
164 |
+
game. Specifically, the training objective can be represented
|
165 |
+
by
|
166 |
+
min
|
167 |
+
G max
|
168 |
+
D Ex∼pr[log D(x)] + Ez∼pz[log(1 − D(G(z)))], (3)
|
169 |
+
where the generator minimizes the probability (1 − D(G(z))
|
170 |
+
that the generated sample is detected as fake by the dis-
|
171 |
+
criminator, while the discriminator maximizes this probability.
|
172 |
+
Therefore, the generator and discriminator compete against
|
173 |
+
each other with the opposite objectives in the training process.
|
174 |
+
Through the adversarial training, the Nash equilibrium can
|
175 |
+
be achieved, such that the generator and discriminator cannot
|
176 |
+
improve their objectives by changing only their own network.
|
177 |
+
Moreover, the global optimum of the training objective can be
|
178 |
+
achieved in the equilibrium when pg = pr. However, training
|
179 |
+
with the objective function in (3) is unstable, since the training
|
180 |
+
objective is potentially not continuous with respect to the
|
181 |
+
generator’s parameters [15]. Therefore, the improved version
|
182 |
+
of GAN, namely, Wasserstein GAN with gradient penalty
|
183 |
+
(WGAN-GP) [15] is adopted. The modified objective function
|
184 |
+
is expressed as
|
185 |
+
min
|
186 |
+
G max
|
187 |
+
D Ex∼pr[D(x)]+Ez∼pz[(1 − D(G(z)))]
|
188 |
+
+λE˜x[(∥∇˜xD(˜x)∥ − 1)2)],
|
189 |
+
(4)
|
190 |
+
where the last term is the gradient penalty term to enforce
|
191 |
+
Lipschitz constraint that the gradient of the GAN network
|
192 |
+
is upper-bounded by a maximum value, the symbol ˜x is the
|
193 |
+
uniformly sampled point between the points of x and G(z).
|
194 |
+
Moreover, the parameter λ is the penalty coefficient.
|
195 |
+
After introducing the framework of GAN, the detailed
|
196 |
+
architecture of proposed GAN network is presented. The
|
197 |
+
structures of generator G and discriminator D are depicted in
|
198 |
+
Fig. 2, where the number in the bracket denotes the dimension.
|
199 |
+
The input to the generator is a noise vector z with dimension
|
200 |
+
nz = 100, which is sampled from the probability density
|
201 |
+
function N(0, σ2Inz). The generator consists of five dense
|
202 |
+
layers, and the numbers of neurons in the dense layers are
|
203 |
+
128, 128, 128, 128, 401, respectively. It is worth noting that
|
204 |
+
the size of the output layer is equal to the size of PDP. The
|
205 |
+
|
206 |
+
Fig. 2. Structure of generator and discriminator.
|
207 |
+
activation function of the first four dense layers is LeakyReLU
|
208 |
+
function, which can speed up the convergence and avoid the
|
209 |
+
gradient vanishing problem. The formula of the LeakyReLU
|
210 |
+
function is expressed as
|
211 |
+
f(x) =
|
212 |
+
�
|
213 |
+
x,
|
214 |
+
if x ≥ 0
|
215 |
+
αx,
|
216 |
+
if x < 0 ,
|
217 |
+
(5)
|
218 |
+
where α is the slope coefficient when the value of neuron x is
|
219 |
+
negative. In addition to the LeakyReLU function, a Sigmoid
|
220 |
+
function is utilized in the last layer, which maps the output to
|
221 |
+
the range of [0, 1]. The Sigmoid function is defined as
|
222 |
+
f(x) =
|
223 |
+
1
|
224 |
+
1 + e−x .
|
225 |
+
(6)
|
226 |
+
After going through the dense layers and activation functions
|
227 |
+
in the generator, the input noise vectors are transformed into
|
228 |
+
the generated samples. Then, the generated samples together
|
229 |
+
with real samples are passed to the discriminator.
|
230 |
+
The discriminator is designed to distinguish between gen-
|
231 |
+
erated samples and real samples. The numbers of neurons
|
232 |
+
for the five dense layers in the discriminator are 512, 256,
|
233 |
+
128, 64, 1, respectively. The activation function chosen for
|
234 |
+
the first 4 layers is the LeakyReLU function introduced
|
235 |
+
before. The activation function for the output layer is linear
|
236 |
+
activation function, which is decided by the objective function
|
237 |
+
of WGAN-GP introduced in (4).
|
238 |
+
C. Proposed T-GAN
|
239 |
+
The framework for the proposed T-GAN is depicted in
|
240 |
+
Fig. 3, in which the transfer learning is conducted between the
|
241 |
+
measurement and 3GPP TR 38.901 model [16]. The measured
|
242 |
+
PDPs denote the PDPs extracted from measurement with
|
243 |
+
a small size, while the simulated PDPs refer to the PDPs
|
244 |
+
simulated by the 3GPP model, which is implemented with
|
245 |
+
the extracted statistics from measurement. Then, the proposed
|
246 |
+
GAN and T-GAN with the same network structure, are trained
|
247 |
+
on the simulated PDPs and measured PDPs, respectively, to
|
248 |
+
capture the distribution of PDPs. Since the size of measured
|
249 |
+
PDPs is quite small for the training of T-GAN, which can
|
250 |
+
cause the difficulty of converging or the over-fitting problem,
|
251 |
+
the transfer learning is exploited to tackle these problems.
|
252 |
+
Fig. 3. Framework for T-GAN.
|
253 |
+
To describe the transfer learning formally, a domain denoted
|
254 |
+
by D consists of a feature space X and a marginal probability
|
255 |
+
distribution P(X) defined on X = {x1, x2, · · · , xN} ∈ X,
|
256 |
+
where N is the number of feature vectors in X. As depicted
|
257 |
+
in Fig. 3, the target domain Dt and source domain Ds are
|
258 |
+
defined on measurement and 3GPP model, respectively. The
|
259 |
+
feature spaces for the two domains are both constructed by
|
260 |
+
PDPs, with different marginal probability distributions defined
|
261 |
+
on measured PDPs Xt and simulated PDPs Xs.
|
262 |
+
Moreover, given a domain D(X, P(X)), a task denoted by
|
263 |
+
T is defined by a label space L and a predictive function f(·),
|
264 |
+
and the predictive function is learned from the pairs (xn, ln)
|
265 |
+
with xn ∈ X and ln ∈ L. In the target domain Dt and source
|
266 |
+
domain Ds, the tasks are the same to capture the distribution
|
267 |
+
of PDPs, and the label space is L = {0, 1} representing
|
268 |
+
whether the PDP sample is generated by the proposed GAN
|
269 |
+
or from the training dataset. The T-GAN and GAN serve as
|
270 |
+
the predictive functions ft and fs. Then, transfer learning is
|
271 |
+
aimed at learning the function ft in target domain Dt with
|
272 |
+
the knowledge of Ts in source domain Ds, i.e., transferring
|
273 |
+
the knowledge stored in GAN trained on simulated PDPs to
|
274 |
+
T-GAN trained on the measured PDPs.
|
275 |
+
The method of fine-tuning [13] is adopted for the transfer
|
276 |
+
learning. The T-GAN is initialized with the weights of the
|
277 |
+
GAN trained on the simulated PDPs, and is then fine-tuned
|
278 |
+
on the measured PDPs with small size. It is worth noting that
|
279 |
+
both the generator and discriminator in the GAN are trans-
|
280 |
+
ferred, which can yield the better performance in generating
|
281 |
+
high quality samples and fast convergence, compared with
|
282 |
+
transferring only the generator or the discriminator [13].
|
283 |
+
With transfer learning, the performance of T-GAN can be
|
284 |
+
largely enhanced. Specifically, the channel statistics extracted
|
285 |
+
for 3GPP method are captured by the proposed GAN trained
|
286 |
+
on simulated PDPs, which are further transferred to T-GAN.
|
287 |
+
Moreover, T-GAN can learn the features of PDPs that are not
|
288 |
+
captured by 3GPP method, directly from measurement, which
|
289 |
+
further improves the performance of T-GAN in modeling the
|
290 |
+
distribution of PDPs.
|
291 |
+
|
292 |
+
Target Domain
|
293 |
+
Source DomainFig. 4. Measurement layout in the indoor corridor scenario [14].
|
294 |
+
III. EXPERIMENT AND PERFORMANCE EVALUATION
|
295 |
+
In this section, the experiment settings are elaborated.
|
296 |
+
Moreover, the performance of the T-GAN are evaluated by
|
297 |
+
comparing the generated distribution of PDPs with measure-
|
298 |
+
ment.
|
299 |
+
A. Dataset and Setup
|
300 |
+
The dataset is collected from the measurement campaign
|
301 |
+
in [14]. which is conducted in an indoor corridor scenario
|
302 |
+
at 306-321 GHz with 400 ns maximum delay, as depicted in
|
303 |
+
Fig. 4. With the measurement data, the PDPs can be extracted
|
304 |
+
to characterize the channel in the 21 receiver points. Since
|
305 |
+
the sample frequency interval is relatively small, as 2.5 MHz,
|
306 |
+
the measured PDPs are very long, including 6001 sample
|
307 |
+
points, which results in extraordinary computation and time
|
308 |
+
consumption to train the GANs. To address this problem,
|
309 |
+
we only use the measured channel transfer functions in the
|
310 |
+
frequency band from 314 to 315 GHz, based on which the
|
311 |
+
PDPs can be shorten to 401 sample points.
|
312 |
+
The PDPs of the 21 measured channels make up the mea-
|
313 |
+
sured dataset. In addition to the measured dataset, the dataset
|
314 |
+
of simulated PDPs can be generated by 3GPP model with
|
315 |
+
the extracted statistics from the measurement, which consists
|
316 |
+
of 10000 channels. Compared to the measured dataset, the
|
317 |
+
simulated dataset has larger data size with the channel statistics
|
318 |
+
embedded. Moreover, the PDPs in two datasets are normalized
|
319 |
+
into the range of [0, 1] by the min-max normalization method.
|
320 |
+
The training procedure of the GAN network is explained
|
321 |
+
in detail as follows. Firstly, the input noise vector z of size
|
322 |
+
100 is generated by the multivariate normal distribution, which
|
323 |
+
can provide the capabilities to transform into the desired
|
324 |
+
distribution. The gradient penalty parameter λ in (4) is set
|
325 |
+
as 10, which works well in the training process. Moreover,
|
326 |
+
the stochastic gradient descent (SGD) optimizer is applied for
|
327 |
+
the generator network, and the adaptive moment estimation
|
328 |
+
(Adam) optimizer is chosen for the discriminator network. In
|
329 |
+
addition, the learning rates of the two optimizers are both set
|
330 |
+
as 0.0002 to stabilize the training.
|
331 |
+
All the experimental results are implemented on a PC with
|
332 |
+
AMD Ryzen Threadripper 3990X @ 2.19 GHz and four
|
333 |
+
Nvidia GeForce RTX 3090 Ti GPUs. In addition, the training
|
334 |
+
of GAN network is carried out in the Pytorch framework.
|
335 |
+
0
|
336 |
+
2000
|
337 |
+
4000
|
338 |
+
6000
|
339 |
+
8000
|
340 |
+
10000
|
341 |
+
Epoch
|
342 |
+
-3
|
343 |
+
-2
|
344 |
+
-1
|
345 |
+
0
|
346 |
+
1
|
347 |
+
2
|
348 |
+
3
|
349 |
+
4
|
350 |
+
Loss
|
351 |
+
G_loss (simulated dataset)
|
352 |
+
D_loss(simulated dataset)
|
353 |
+
G_loss (measured dataset)
|
354 |
+
D_loss (measured dataset)
|
355 |
+
TG_loss (measured dataset)
|
356 |
+
TD_loss (measured dataset)
|
357 |
+
Fig. 5. Loss of the generator and discriminator in the GAN network.
|
358 |
+
B. Convergence
|
359 |
+
The proposed GAN is first trained on the simulated dataset,
|
360 |
+
and is then fine-tuned on the measured dataset with transfer
|
361 |
+
learning to develop the T-GAN. The numbers of epochs for
|
362 |
+
training the proposed GAN and T-GAN are both set as 10000.
|
363 |
+
A epoch is defined as a complete training cycle through the
|
364 |
+
training dataset, in which the generator and discriminator are
|
365 |
+
iteratively trained for once. To demonstrate the benefits of
|
366 |
+
transfer learning, the GAN is also trained on the measured
|
367 |
+
dataset without transfer learning for comparison. The loss of
|
368 |
+
generator denoted by G loss and loss of discriminator denoted
|
369 |
+
by D loss are shown in the Fig. 5, in which the TG loss and
|
370 |
+
TD loss correspond to the losses for T-GAN. For the simu-
|
371 |
+
lated dataset, it is clear that the generator and discriminator
|
372 |
+
reach the equilibrium in the end. For the measured dataset, the
|
373 |
+
loss of T-GAN is close to the loss for the simulated dataset
|
374 |
+
except for some small fluctuations. The fluctuations are due
|
375 |
+
to the small size of the measured dataset. By comparison, the
|
376 |
+
training is not stable for the GAN network without transfer
|
377 |
+
leaning. There is large fluctuation in the discriminator loss,
|
378 |
+
and the absolute values of G loss and D loss are quite
|
379 |
+
large compared to the losses for the simulated dataset. The
|
380 |
+
comparison demonstrates the benefits of the transfer learning
|
381 |
+
in the training of GAN network, which enables T-GAN to
|
382 |
+
converge with a small training dataset. Moreover, it takes
|
383 |
+
only 4000 epochs for T-GAN to converge, compared to 6000
|
384 |
+
epochs for GAN trained on the simulated dataset. The training
|
385 |
+
time of T-GAN on the measured dataset is also small, which
|
386 |
+
is only 114 seconds compared to 7 hours for GAN trained
|
387 |
+
on the simulated dataset. From these results, it is clear that
|
388 |
+
the transfer learning technique can improve the convergence
|
389 |
+
rate of T-GAN, and reduce the training overhead with the
|
390 |
+
knowledge from the pre-trained model.
|
391 |
+
|
392 |
+
nDoor -
|
393 |
+
Wooden wall
|
394 |
+
Glass wall
|
395 |
+
Concrete wall Metal pillars
|
396 |
+
a
|
397 |
+
g
|
398 |
+
ka
|
399 |
+
D
|
400 |
+
业下
|
401 |
+
不业
|
402 |
+
Rx 1 ~ Rx 15
|
403 |
+
Rx 16 ~ Rx 21
|
404 |
+
0.86 m 1.26 m
|
405 |
+
N
|
406 |
+
TX
|
407 |
+
0.93 m
|
408 |
+
5.88 m
|
409 |
+
F
|
410 |
+
5 m
|
411 |
+
3
|
412 |
+
19 m
|
413 |
+
S
|
414 |
+
31 m
|
415 |
+
58 m
|
416 |
+
D
|
417 |
+
buD
|
418 |
+
ID
|
419 |
+
C
|
420 |
+
9000
|
421 |
+
100
|
422 |
+
200
|
423 |
+
300
|
424 |
+
400
|
425 |
+
[ns]
|
426 |
+
-85
|
427 |
+
-80
|
428 |
+
-75
|
429 |
+
-70
|
430 |
+
-65
|
431 |
+
-60
|
432 |
+
-55
|
433 |
+
-50
|
434 |
+
Power [dB]
|
435 |
+
Measurement
|
436 |
+
3GPP
|
437 |
+
GAN
|
438 |
+
T-GAN
|
439 |
+
(a) Samples of PDP.
|
440 |
+
0
|
441 |
+
100
|
442 |
+
200
|
443 |
+
300
|
444 |
+
400
|
445 |
+
[ns]
|
446 |
+
-82
|
447 |
+
-80
|
448 |
+
-78
|
449 |
+
-76
|
450 |
+
-74
|
451 |
+
-72
|
452 |
+
-70
|
453 |
+
-68
|
454 |
+
-66
|
455 |
+
Power [dB]
|
456 |
+
Measurement
|
457 |
+
3GPP
|
458 |
+
GAN
|
459 |
+
T-GAN
|
460 |
+
(b) Average PDP.
|
461 |
+
Fig. 6. Plot of PDPs generated by measurement, 3GPP, the proposed GAN and T-GAN.
|
462 |
+
0.3
|
463 |
+
0.4
|
464 |
+
0.5
|
465 |
+
0.6
|
466 |
+
0.7
|
467 |
+
0.8
|
468 |
+
0.9
|
469 |
+
SSIM
|
470 |
+
0
|
471 |
+
0.2
|
472 |
+
0.4
|
473 |
+
0.6
|
474 |
+
0.8
|
475 |
+
1
|
476 |
+
Culmultative probability function
|
477 |
+
3GPP
|
478 |
+
GAN
|
479 |
+
T-GAN
|
480 |
+
Fig. 7. SSIM of PDP for 3GPP, the proposed GAN and T-GAN.
|
481 |
+
C. Power Delay Profile
|
482 |
+
In the experiment, the samples of PDP from measurement,
|
483 |
+
3GPP method, the proposed GAN and T-GAN are compared
|
484 |
+
as in Fig. 6(a). It is clear that the PDPs are similar to each
|
485 |
+
other, which proves that the proposed GAN and T-GAN can
|
486 |
+
learn the features of PDPs. Moreover, it is observed that PDP
|
487 |
+
of measurement is more complex than PDP of 3GPP method.
|
488 |
+
There are more peaks and fluctuations in the temporal domain.
|
489 |
+
This shows that 3GPP cannot well capture the channel effects
|
490 |
+
embedded in PDP. Comparing PDPs generated by the proposed
|
491 |
+
GAN and T-GAN, the PDP generated by T-GAN is close to
|
492 |
+
measurement, while the PDP generated by the proposed GAN
|
493 |
+
is similar to the 3GPP approach. This is reasonable, since the
|
494 |
+
T-GAN can capture the features of PDP from measurement
|
495 |
+
0
|
496 |
+
20
|
497 |
+
40
|
498 |
+
60
|
499 |
+
80
|
500 |
+
100
|
501 |
+
120
|
502 |
+
140
|
503 |
+
Delay spread [ns]
|
504 |
+
0
|
505 |
+
0.2
|
506 |
+
0.4
|
507 |
+
0.6
|
508 |
+
0.8
|
509 |
+
1
|
510 |
+
Culmulative probability function
|
511 |
+
Measurement
|
512 |
+
3GPP
|
513 |
+
GAN
|
514 |
+
T-GAN
|
515 |
+
Fig. 8. Delay spread for 3GPP, the proposed GAN and T-GAN.
|
516 |
+
through transfer learning, while the propose GAN can only
|
517 |
+
learn the features of the simulated PDPs by 3GPP method.
|
518 |
+
In addition, the average PDPs for these method are plotted
|
519 |
+
in Fig. 6(b). It is clear that T-GAN shows good agreement with
|
520 |
+
measurement, while 3GPP and GAN have large deviations
|
521 |
+
from measurement. The deviations can be measured by root-
|
522 |
+
mean-square error (RMSE), calculated as
|
523 |
+
RMSE =
|
524 |
+
�
|
525 |
+
1
|
526 |
+
Nτ
|
527 |
+
�
|
528 |
+
(Pm(i∆τ) − Pg(i∆τ))2,
|
529 |
+
(7)
|
530 |
+
where Nτ denotes the number of sampling points in PDP, i
|
531 |
+
indexs temporal sample points of PDPs, Nτ represents the
|
532 |
+
number of sampling points and ∆τ is the sampling interval.
|
533 |
+
Moreover, Pm(i∆τ) and Pg(i∆τ) are the average power in the
|
534 |
+
|
535 |
+
ith sample point of measured PDPs and generated PDPs, re-
|
536 |
+
spectively. The results of RMSE for 3GPP, the proposed GAN
|
537 |
+
and T-GAN are 4.29 dB, 4.12 dB and -4.82 dB, respectively.
|
538 |
+
The T-GAN improves the performance of RMSE by about 9
|
539 |
+
dB, compared with other methods, which demonstrates that the
|
540 |
+
T-GAN outperforms the other methods in terms of modeling
|
541 |
+
the average power of PDP. This is attributed to the powerful
|
542 |
+
capability of GAN in modeling the complex distribution, and
|
543 |
+
the benefits of transfer learning in better utilizing the small
|
544 |
+
measurement dataset.
|
545 |
+
Moreover, to measure the similarity quantitatively, Structure
|
546 |
+
Similarity Index Measure (SSIM) is introduced, which is
|
547 |
+
widely applied to evaluate the quality and similarity of images.
|
548 |
+
The range of SSIM is from 0 to 1, and the value of SSIM is
|
549 |
+
larger when the similarity between images is higher. The PDPs
|
550 |
+
generated by 3GPP method, the proposed GAN and T-GAN
|
551 |
+
are compared with measurement. The cumulative probability
|
552 |
+
functions (CDFs) of SSIM for these method are shown in
|
553 |
+
Fig. 7. It can be observed that the proposed T-GAN can
|
554 |
+
achieve higher SSIM values compared with other methods.
|
555 |
+
More than 40% of SSIM values are higher than 0.6 for T-
|
556 |
+
GAN, compared to only 20% for 3GPP and the proposed
|
557 |
+
GAN. This further demonstrates the better performance of T-
|
558 |
+
GAN in modeling the PDPs.
|
559 |
+
D. Delay Spread
|
560 |
+
Delay spread characterizes the power dispersion of multi-
|
561 |
+
path components in the temporal domain, which can be
|
562 |
+
calculated as the second central moment of PDPs, by
|
563 |
+
¯τ =
|
564 |
+
�Nτ
|
565 |
+
i=0 i∆τP(i∆τ)∆τ
|
566 |
+
�Nτ
|
567 |
+
i=0 P(i∆τ)∆τ
|
568 |
+
,
|
569 |
+
τrms =
|
570 |
+
�
|
571 |
+
�
|
572 |
+
�
|
573 |
+
�
|
574 |
+
�Nτ
|
575 |
+
i=0(i∆τ − ¯τ)2P(i∆τ)∆τ
|
576 |
+
�Nτ
|
577 |
+
i=0 P(i∆τ)∆τ
|
578 |
+
,
|
579 |
+
(8)
|
580 |
+
where ¯τ denotes the mean delay weighted by the power,
|
581 |
+
τrms refers to the root-mean-square (RMS) delay spread, and
|
582 |
+
P(i∆τ) are the power in the ith sample point of PDPs.
|
583 |
+
Then, the CDF plot of delay spread for measurement, 3GPP,
|
584 |
+
the proposed GAN and T-GAN is depicted in Fig. 8. It can be
|
585 |
+
observed that the CDFs of delay spread for 3GPP, the proposed
|
586 |
+
GAN and T-GAN match the measurement well.
|
587 |
+
IV. CONCLUSION
|
588 |
+
In this paper, we proposed a T-GAN based THz channel
|
589 |
+
modeling method, which can capture the distribution of PDPs
|
590 |
+
for the THz channel with the designed GAN. Moreover, the
|
591 |
+
transfer learning is exploited in T-GAN to reduce the size
|
592 |
+
requirement of training dataset and enhance the performance
|
593 |
+
of GAN, through transferring the knowledge stored in the
|
594 |
+
pre-trained GAN on the simulated dataset to the target T-
|
595 |
+
GAN trained on limited measurement. Finally, we validate
|
596 |
+
the performance of T-GAN with measurement. T-GAN can
|
597 |
+
generate the PDPs that have good agreement with measure-
|
598 |
+
ment. Compared with conventional methods, T-GAN has better
|
599 |
+
performance in modeling the distribution of PDPs, with 9 dB
|
600 |
+
improved RMSE and higher SSIM. More than 40% of SSIM
|
601 |
+
values are higher than 0.6 for T-GAN, compared to only 20%
|
602 |
+
for 3GPP and the proposed GAN.
|
603 |
+
REFERENCES
|
604 |
+
[1] I. F. Akyildiz, C. Han, Z. Hu, S. Nie, and J. M. Jornet, “Terahertz band
|
605 |
+
communication: An old problem revisited and research directions for
|
606 |
+
the next decade (invited paper),” IEEE Trans. Commun., vol. 70, no. 6,
|
607 |
+
pp. 4250–4285, 2022.
|
608 |
+
[2] Z. Chen et al., “Terahertz wireless communications for 2030 and beyond:
|
609 |
+
A cutting-edge frontier,” IEEE Commun. Mag., vol. 59, no. 11, pp. 66–
|
610 |
+
72, 2021.
|
611 |
+
[3] C. Han, Y. Wu, Z. Chen, Y. Chen, and G. Wang, “THz ISAC: A Physical-
|
612 |
+
Layer Perspective of Terahertz Integrated Sensing and Communication,”
|
613 |
+
arXiv preprint:2209.03145, 2022.
|
614 |
+
[4] P. Petrus, J. Reed, and T. Rappaport, “Geometrical-based statistical
|
615 |
+
macrocell channel model for mobile environments,” IEEE Trans. Com-
|
616 |
+
mun., vol. 50, no. 3, pp. 495–502, 2002.
|
617 |
+
[5] Y. Chen, L. Yan, and C. Han, “Hybrid spherical- and planar-wave
|
618 |
+
modeling and DCNN-powered estimation of Terahertz Ultra-Massive
|
619 |
+
MIMO channels,” IEEE Trans. Commun., vol. 69, no. 10, pp. 7063–
|
620 |
+
7076, 2021.
|
621 |
+
[6] Y. Chen, L. Yan, C. Han, and M. Tao, “Millidegree-level direction-
|
622 |
+
of-arrival estimation and tracking for terahertz ultra-massive mimo
|
623 |
+
systems,” IEEE Trans. Wirel. Commun., vol. 21, no. 2, pp. 869–883,
|
624 |
+
2022.
|
625 |
+
[7] T. Wang, C.-K. Wen, S. Jin, and G. Y. Li, “Deep learning-based CSI
|
626 |
+
feedback approach for time-varying massive MIMO channels,” IEEE
|
627 |
+
Wirel. Commun. Lett., vol. 8, no. 2, pp. 416–419, 2019.
|
628 |
+
[8] T. J. O’Shea, T. Roy, and N. West, “Approximating the void: Learning
|
629 |
+
stochastic channel models from observation with variational generative
|
630 |
+
adversarial networks,” in Proc. Int. Conf. Comput., Netw. Commun.,
|
631 |
+
2019, pp. 681–686.
|
632 |
+
[9] H. Xiao, W. Tian, W. Liu, and J. Shen, “ChannelGan: Deep learning-
|
633 |
+
based channel modeling and generating,” IEEE Wirel. Commun. Lett.,
|
634 |
+
vol. 11, no. 3, pp. 650–654, 2022.
|
635 |
+
[10] T. Orekondy, A. Behboodi, and J. B. Soriaga, “MIMO-GAN: Generative
|
636 |
+
MIMO channel modeling,” in Proc. IEEE Int. Conf. Commun., 2022, pp.
|
637 |
+
5322–5328.
|
638 |
+
[11] Y. Wei, M.-M. Zhao, and M.-J. Zhao, “Channel distribution learning:
|
639 |
+
Model-driven gan-based channel modeling for irs-aided wireless com-
|
640 |
+
munication,” IEEE Trans. Commun., vol. 70, no. 7, pp. 4482–4497,
|
641 |
+
2022.
|
642 |
+
[12] N. V. Huynh and G. Y. Li, “Transfer learning for signal detection in
|
643 |
+
wireless networks,” IEEE Wirel. Commun. Lett., pp. 1–1, 2022.
|
644 |
+
[13] Y. Wang, C. Wu, L. Herranz, J. van de Weijer, A. Gonzalez-Garcia, and
|
645 |
+
B. Raducanu, “Transferring gans: generating images from limited data,”
|
646 |
+
in ECCV, 2018.
|
647 |
+
[14] Y. Li, Y. Wang, Y. Chen, Z. Yu, and C. Han, “Channel measurement
|
648 |
+
and analysis in an indoor corridor scenario at 300 ghz,” in Proc. IEEE
|
649 |
+
Int. Conf. Commun., 2022, pp. 2888–2893.
|
650 |
+
[15] I. Gulrajani, F. Ahmed, M. Arjovsky, V. Dumoulin, and A. Courville,
|
651 |
+
“Improved training of Wasserstein GANs,” in Proc. Int. Conf. Neural
|
652 |
+
Inf. Process. Syst., 2017, p. 5769–5779.
|
653 |
+
[16] Study on Channel Model for Frequencies From 0.5 to 100 GHz (Release
|
654 |
+
15), document TR 38.901, 3GPP, 2018.
|
655 |
+
|
2tAzT4oBgHgl3EQfDvpk/content/tmp_files/load_file.txt
ADDED
@@ -0,0 +1,405 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf,len=404
|
2 |
+
page_content='Transfer Generative Adversarial Networks (T-GAN)-based Terahertz Channel Modeling Zhengdong Hu, Yuanbo Li, and Chong Han Terahertz Wireless Communications (TWC) Laboratory, Shanghai Jiao Tong University, China.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
3 |
+
page_content=' Email: {huzhengdong, yuanbo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
4 |
+
page_content='li, chong.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
5 |
+
page_content='han}@sjtu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
6 |
+
page_content='edu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
7 |
+
page_content='cn Abstract—Terahertz (THz) communications are envisioned as a promising technology for 6G and beyond wireless systems, providing ultra-broad bandwidth and thus Terabit-per-second (Tbps) data rates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
8 |
+
page_content=' However, as foundation of designing THz communications, channel modeling and characterization are fundamental to scrutinize the potential of the new spectrum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
9 |
+
page_content=' Relied on physical measurements, traditional statistical channel modeling methods suffer from the problem of low accuracy with the assumed certain distributions and empirical parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
10 |
+
page_content=' Moreover, it is time-consuming and expensive to acquire extensive channel measurement in the THz band.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
11 |
+
page_content=' In this paper, a transfer generative adversarial network (T-GAN) based modeling method is proposed in the THz band, which exploits the advantage of GAN in modeling the complex distribution, and the benefit of transfer learning in transferring the knowledge from a source task to improve generalization about the target task with limited training data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
12 |
+
page_content=' Specifically, to start with, the proposed GAN is pre- trained using the simulated dataset, generated by the standard channel model from 3rd generation partnerships project (3GPP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
13 |
+
page_content=' Furthermore, by transferring the knowledge and fine-tuning the pre-trained GAN, the T-GAN is developed by using the THz mea- sured dataset with a small amount.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
14 |
+
page_content=' Experimental results reveal that the distribution of PDPs generated by the proposed T-GAN method shows good agreement with measurement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
15 |
+
page_content=' Moreover, T- GAN achieves good performance in channel modeling, with 9 dB improved root-mean-square error (RMSE) and higher Structure Similarity Index Measure (SSIM), compared with traditional 3GPP method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
16 |
+
page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
17 |
+
page_content=' INTRODUCTION With the exponential growth of the number of intercon- nected devices, the sixth generation (6G) is expected to achieve intelligent connections of everything, anywhere, anytime [1], which demands Tbit/s wireless data rates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
18 |
+
page_content=' To fulfill the de- mand, Terahertz (THz) communications gain increasing atten- tion as a vital technology of 6G systems, thanks to the ultra- broad bandwidth ranging from tens of GHz to hundreds of GHz [2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
19 |
+
page_content=' The THz band is promising to address the spectrum scarcity and capacity limitations of current wireless systems, and realize long-awaited applications, extending from wireless cognition, localization/positioning, to integrated sensing and communication [3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
20 |
+
page_content=' To design reliable THz wireless systems, one fundamental challenge lies in developing an accurate channel model to por- tray the propagation phenomena.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
21 |
+
page_content=' Due to the high frequencies, new characteristics occur in the THz band, such as frequency- selective absorption loss and rough-surface scattering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
22 |
+
page_content=' At- tribute to these new characteristics, THz channel modeling is required to capture these characteristics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
23 |
+
page_content=' However, traditional statistical channel modeling methods suffer from the prob- lem of low accuracy with the assumed certain distributions and empirical parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
24 |
+
page_content=' For example, a geometric based stochastic channel model (GSCM) assumes that the positions of scatters follow certain statistical distributions, such as the uniform distribution within a circle around the transmitters and receivers [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
25 |
+
page_content=' However, the positions of scatters are hard to characterize by certain statistical distributions, making the GSCM not accurate for utilization in the THz band.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
26 |
+
page_content=' Moreover, it is time-consuming and costly to acquire extensive channel measurement for THz channel modeling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
27 |
+
page_content=' To this end, an accurate channel modeling method with limited measurement data for the THz band is needed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
28 |
+
page_content=' Recently, deep learning (DL) is popular and widely applied in wireless communications, such as channel estimation [5], [6] and channel state information (CSI) feedback [7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
29 |
+
page_content=' Among different kinds of DL methods, the generative adversarial network (GAN) has the advantage of modeling complex dis- tribution accurately without any statistical assumptions, based on which GAN can be utilized to develop channel models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
30 |
+
page_content=' The authors in [8] train GAN to approximate the probability distri- bution functions (PDFs) of stochastic channel response.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
31 |
+
page_content=' In [9], GAN is applied to generate synthetic channel samples close to the distribution of real channel samples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
32 |
+
page_content=' The researchers in [10] model the channel with GAN through channel input- output measurements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
33 |
+
page_content=' In [11], a model-driven GAN-based channel modeling method is developed in intelligent reflecting surface (IRS) aided communication system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
34 |
+
page_content=' These methods achieve good performance in modeling the channel and prove high consistency between the target channel distribution and the generated channel distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
35 |
+
page_content=' However, the GAN based channel modeling method has not been exploited in the THz band.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
36 |
+
page_content=' Moreover, it is a challenge to train GAN for channel modeling with the scarce THz channel measurement dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
37 |
+
page_content=' In this paper, a transfer GAN (T-GAN)-based THz channel modeling method is proposed, which can learn the distribution of power delay profile (PDP) of the THz channel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
38 |
+
page_content=' Moreover, to tackle the challenge of limited channel measurement in the THz band, the transfer learning technique is introduced in T-GAN, which reduces the size requirement of channel dataset for training and enhances the performance of channel modeling, through transferring the knowledge stored in a pre- trained model to a new model [12], [13].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
39 |
+
page_content=' Furthermore, the performance of T-GAN in modeling the channel distribution is validated by real measurements [14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
40 |
+
page_content=' The contributions of this paper are listed as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
41 |
+
page_content=' We propose a T-GAN based THz channel modeling arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
42 |
+
page_content='00981v1 [eess.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
43 |
+
page_content='SP] 3 Jan 2023 method, in which a GAN is designed to capture the distribution of PDPs of the THz channel, by training on the dataset of PDP samples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
44 |
+
page_content=' To tackle the challenge of limited measurement data for THz channel modeling, transfer learning is further exploited by T-GAN, which reduces the size requirement of training dataset, and enhances the performance of GAN, through transferring the knowledge stored in a pre- trained model to a new model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
45 |
+
page_content=' The rest of the sections are organized as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
46 |
+
page_content=' Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
47 |
+
page_content=' II details the proposed T-GAN based channel modeling method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
48 |
+
page_content=' Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
49 |
+
page_content=' III demonstrates the performance of the proposed T-GAN method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
50 |
+
page_content=' The paper is concluded in Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
51 |
+
page_content=' IV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
52 |
+
page_content=' Notation: a is a scalar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
53 |
+
page_content=' a denotes a vector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
54 |
+
page_content=' A represents a matrix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
55 |
+
page_content=' E{·} describes the expectation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
56 |
+
page_content=' ∇ denotes the gradient operation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
57 |
+
page_content=' ∥·∥ represent the L2 norm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
58 |
+
page_content=' IN defines an N dimen- sional identity matrix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
59 |
+
page_content=' N denotes the normal distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
60 |
+
page_content=' II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
61 |
+
page_content=' TRANSFER GAN (T-GAN) BASED CHANNEL MODELING In this section, the channel modeling problem is first for- mulated into a channel distribution learning problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
62 |
+
page_content=' Then, the proposed GAN in T-GAN method is elaborated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
63 |
+
page_content=' Finally, T-GAN is presented.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
64 |
+
page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
65 |
+
page_content=' Problem Formulation The channel impulse response (CIR) can be represented as h(τ) = L−1 � l=0 αlejφlδ(τ − τl), (1) where τl denotes the delay of the lth multi-path components (MPCs), L denotes the number of MPC, αl refers to the path gain and φl represents the phase of the corresponding MPC.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
66 |
+
page_content=' To characterize the channel, PDP is an important feature, which indicates the dispersion of power over the time delay, specifically, the received power with respect to the delay in a multi-path channel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
67 |
+
page_content=' It can be extracted from the channel impulse response by P(τ) = |h(τ)|2, (2) Then, the channel modeling problem is exploited by learning the distribution of PDPs denoted by pr, which is difficult to be analytically represented.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
68 |
+
page_content=' Instead, the distribution pr can be captured by generating fake PDP samples with distribution pg, such that the generated distribution pg of PDPs can match the actual distribution pr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
69 |
+
page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
70 |
+
page_content=' Proposed GAN The GAN can be utilized to learn the distribution of PDPs denoted by pr, with the framework depicted in Fig 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
71 |
+
page_content=' The GAN consists of two sub-networks, namely, generator and discriminator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
72 |
+
page_content=' The generator is aimed at generating fake samples G(z) to fool the discriminator, in which z is the noise sample, by mapping the input noise distribution pz(z) to the generated distribution pg = p(G(z)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
73 |
+
page_content=' The discriminator tries to Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
74 |
+
page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
75 |
+
page_content=' Framework of GAN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
76 |
+
page_content=' distinguish between real samples x from pr and fake samples G(z) from pg, and the output of the discriminator D(x) and D(G(z)) can be treated as the probability of being a real sample.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
77 |
+
page_content=' The two networks are trained in an adversarial manner, which can be considered as a two-player zero-sum minimax game.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
78 |
+
page_content=' Specifically, the training objective can be represented by min G max D Ex∼pr[log D(x)] + Ez∼pz[log(1 − D(G(z)))], (3) where the generator minimizes the probability (1 − D(G(z)) that the generated sample is detected as fake by the dis- criminator, while the discriminator maximizes this probability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
79 |
+
page_content=' Therefore, the generator and discriminator compete against each other with the opposite objectives in the training process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
80 |
+
page_content=' Through the adversarial training, the Nash equilibrium can be achieved, such that the generator and discriminator cannot improve their objectives by changing only their own network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
81 |
+
page_content=' Moreover, the global optimum of the training objective can be achieved in the equilibrium when pg = pr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
82 |
+
page_content=' However, training with the objective function in (3) is unstable, since the training objective is potentially not continuous with respect to the generator’s parameters [15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
83 |
+
page_content=' Therefore, the improved version of GAN, namely, Wasserstein GAN with gradient penalty (WGAN-GP) [15] is adopted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
84 |
+
page_content=' The modified objective function is expressed as min G max D Ex∼pr[D(x)]+Ez∼pz[(1 − D(G(z)))] +λE˜x[(∥∇˜xD(˜x)∥ − 1)2)], (4) where the last term is the gradient penalty term to enforce Lipschitz constraint that the gradient of the GAN network is upper-bounded by a maximum value, the symbol ˜x is the uniformly sampled point between the points of x and G(z).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
85 |
+
page_content=' Moreover, the parameter λ is the penalty coefficient.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
86 |
+
page_content=' After introducing the framework of GAN, the detailed architecture of proposed GAN network is presented.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
87 |
+
page_content=' The structures of generator G and discriminator D are depicted in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
88 |
+
page_content=' 2, where the number in the bracket denotes the dimension.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
89 |
+
page_content=' The input to the generator is a noise vector z with dimension nz = 100, which is sampled from the probability density function N(0, σ2Inz).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
90 |
+
page_content=' The generator consists of five dense layers, and the numbers of neurons in the dense layers are 128, 128, 128, 128, 401, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
91 |
+
page_content=' It is worth noting that the size of the output layer is equal to the size of PDP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
92 |
+
page_content=' The Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
93 |
+
page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
94 |
+
page_content=' Structure of generator and discriminator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
95 |
+
page_content=' activation function of the first four dense layers is LeakyReLU function, which can speed up the convergence and avoid the gradient vanishing problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
96 |
+
page_content=' The formula of the LeakyReLU function is expressed as f(x) = � x, if x ≥ 0 αx, if x < 0 , (5) where α is the slope coefficient when the value of neuron x is negative.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
97 |
+
page_content=' In addition to the LeakyReLU function, a Sigmoid function is utilized in the last layer, which maps the output to the range of [0, 1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
98 |
+
page_content=' The Sigmoid function is defined as f(x) = 1 1 + e−x .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
99 |
+
page_content=' (6) After going through the dense layers and activation functions in the generator, the input noise vectors are transformed into the generated samples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
100 |
+
page_content=' Then, the generated samples together with real samples are passed to the discriminator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
101 |
+
page_content=' The discriminator is designed to distinguish between gen- erated samples and real samples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
102 |
+
page_content=' The numbers of neurons for the five dense layers in the discriminator are 512, 256, 128, 64, 1, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
103 |
+
page_content=' The activation function chosen for the first 4 layers is the LeakyReLU function introduced before.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
104 |
+
page_content=' The activation function for the output layer is linear activation function, which is decided by the objective function of WGAN-GP introduced in (4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
105 |
+
page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
106 |
+
page_content=' Proposed T-GAN The framework for the proposed T-GAN is depicted in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
107 |
+
page_content=' 3, in which the transfer learning is conducted between the measurement and 3GPP TR 38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
108 |
+
page_content='901 model [16].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
109 |
+
page_content=' The measured PDPs denote the PDPs extracted from measurement with a small size, while the simulated PDPs refer to the PDPs simulated by the 3GPP model, which is implemented with the extracted statistics from measurement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
110 |
+
page_content=' Then, the proposed GAN and T-GAN with the same network structure, are trained on the simulated PDPs and measured PDPs, respectively, to capture the distribution of PDPs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
111 |
+
page_content=' Since the size of measured PDPs is quite small for the training of T-GAN, which can cause the difficulty of converging or the over-fitting problem, the transfer learning is exploited to tackle these problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
112 |
+
page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
113 |
+
page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
114 |
+
page_content=' Framework for T-GAN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
115 |
+
page_content=' To describe the transfer learning formally, a domain denoted by D consists of a feature space X and a marginal probability distribution P(X) defined on X = {x1, x2, · · · , xN} ∈ X, where N is the number of feature vectors in X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
116 |
+
page_content=' As depicted in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
117 |
+
page_content=' 3, the target domain Dt and source domain Ds are defined on measurement and 3GPP model, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
118 |
+
page_content=' The feature spaces for the two domains are both constructed by PDPs, with different marginal probability distributions defined on measured PDPs Xt and simulated PDPs Xs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
119 |
+
page_content=' Moreover, given a domain D(X, P(X)), a task denoted by T is defined by a label space L and a predictive function f(·), and the predictive function is learned from the pairs (xn, ln) with xn ∈ X and ln ∈ L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
120 |
+
page_content=' In the target domain Dt and source domain Ds, the tasks are the same to capture the distribution of PDPs, and the label space is L = {0, 1} representing whether the PDP sample is generated by the proposed GAN or from the training dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
121 |
+
page_content=' The T-GAN and GAN serve as the predictive functions ft and fs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
122 |
+
page_content=' Then, transfer learning is aimed at learning the function ft in target domain Dt with the knowledge of Ts in source domain Ds, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
123 |
+
page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
124 |
+
page_content=', transferring the knowledge stored in GAN trained on simulated PDPs to T-GAN trained on the measured PDPs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
125 |
+
page_content=' The method of fine-tuning [13] is adopted for the transfer learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
126 |
+
page_content=' The T-GAN is initialized with the weights of the GAN trained on the simulated PDPs, and is then fine-tuned on the measured PDPs with small size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
127 |
+
page_content=' It is worth noting that both the generator and discriminator in the GAN are trans- ferred, which can yield the better performance in generating high quality samples and fast convergence, compared with transferring only the generator or the discriminator [13].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
128 |
+
page_content=' With transfer learning, the performance of T-GAN can be largely enhanced.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
129 |
+
page_content=' Specifically, the channel statistics extracted for 3GPP method are captured by the proposed GAN trained on simulated PDPs, which are further transferred to T-GAN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
130 |
+
page_content=' Moreover, T-GAN can learn the features of PDPs that are not captured by 3GPP method, directly from measurement, which further improves the performance of T-GAN in modeling the distribution of PDPs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
131 |
+
page_content=' Target Domain Source DomainFig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
132 |
+
page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
133 |
+
page_content=' Measurement layout in the indoor corridor scenario [14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
134 |
+
page_content=' III.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
135 |
+
page_content=' EXPERIMENT AND PERFORMANCE EVALUATION In this section, the experiment settings are elaborated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
136 |
+
page_content=' Moreover, the performance of the T-GAN are evaluated by comparing the generated distribution of PDPs with measure- ment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
137 |
+
page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
138 |
+
page_content=' Dataset and Setup The dataset is collected from the measurement campaign in [14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
139 |
+
page_content=' which is conducted in an indoor corridor scenario at 306-321 GHz with 400 ns maximum delay, as depicted in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
140 |
+
page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
141 |
+
page_content=' With the measurement data, the PDPs can be extracted to characterize the channel in the 21 receiver points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
142 |
+
page_content=' Since the sample frequency interval is relatively small, as 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
143 |
+
page_content='5 MHz, the measured PDPs are very long, including 6001 sample points, which results in extraordinary computation and time consumption to train the GANs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
144 |
+
page_content=' To address this problem, we only use the measured channel transfer functions in the frequency band from 314 to 315 GHz, based on which the PDPs can be shorten to 401 sample points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
145 |
+
page_content=' The PDPs of the 21 measured channels make up the mea- sured dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
146 |
+
page_content=' In addition to the measured dataset, the dataset of simulated PDPs can be generated by 3GPP model with the extracted statistics from the measurement, which consists of 10000 channels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
147 |
+
page_content=' Compared to the measured dataset, the simulated dataset has larger data size with the channel statistics embedded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
148 |
+
page_content=' Moreover, the PDPs in two datasets are normalized into the range of [0, 1] by the min-max normalization method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
149 |
+
page_content=' The training procedure of the GAN network is explained in detail as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
150 |
+
page_content=' Firstly, the input noise vector z of size 100 is generated by the multivariate normal distribution, which can provide the capabilities to transform into the desired distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
151 |
+
page_content=' The gradient penalty parameter λ in (4) is set as 10, which works well in the training process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
152 |
+
page_content=' Moreover, the stochastic gradient descent (SGD) optimizer is applied for the generator network, and the adaptive moment estimation (Adam) optimizer is chosen for the discriminator network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
153 |
+
page_content=' In addition, the learning rates of the two optimizers are both set as 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
154 |
+
page_content='0002 to stabilize the training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
155 |
+
page_content=' All the experimental results are implemented on a PC with AMD Ryzen Threadripper 3990X @ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
156 |
+
page_content='19 GHz and four Nvidia GeForce RTX 3090 Ti GPUs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
157 |
+
page_content=' In addition, the training of GAN network is carried out in the Pytorch framework.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
158 |
+
page_content=' 0 2000 4000 6000 8000 10000 Epoch 3 2 1 0 1 2 3 4 Loss G_loss (simulated dataset) D_loss(simulated dataset) G_loss (measured dataset) D_loss (measured dataset) TG_loss (measured dataset) TD_loss (measured dataset) Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
159 |
+
page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
160 |
+
page_content=' Loss of the generator and discriminator in the GAN network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
161 |
+
page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
162 |
+
page_content=' Convergence The proposed GAN is first trained on the simulated dataset, and is then fine-tuned on the measured dataset with transfer learning to develop the T-GAN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
163 |
+
page_content=' The numbers of epochs for training the proposed GAN and T-GAN are both set as 10000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
164 |
+
page_content=' A epoch is defined as a complete training cycle through the training dataset, in which the generator and discriminator are iteratively trained for once.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
165 |
+
page_content=' To demonstrate the benefits of transfer learning, the GAN is also trained on the measured dataset without transfer learning for comparison.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
166 |
+
page_content=' The loss of generator denoted by G loss and loss of discriminator denoted by D loss are shown in the Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
167 |
+
page_content=' 5, in which the TG loss and TD loss correspond to the losses for T-GAN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
168 |
+
page_content=' For the simu- lated dataset, it is clear that the generator and discriminator reach the equilibrium in the end.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
169 |
+
page_content=' For the measured dataset, the loss of T-GAN is close to the loss for the simulated dataset except for some small fluctuations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
170 |
+
page_content=' The fluctuations are due to the small size of the measured dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
171 |
+
page_content=' By comparison, the training is not stable for the GAN network without transfer leaning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
172 |
+
page_content=' There is large fluctuation in the discriminator loss, and the absolute values of G loss and D loss are quite large compared to the losses for the simulated dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
173 |
+
page_content=' The comparison demonstrates the benefits of the transfer learning in the training of GAN network, which enables T-GAN to converge with a small training dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
174 |
+
page_content=' Moreover, it takes only 4000 epochs for T-GAN to converge, compared to 6000 epochs for GAN trained on the simulated dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
175 |
+
page_content=' The training time of T-GAN on the measured dataset is also small, which is only 114 seconds compared to 7 hours for GAN trained on the simulated dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
176 |
+
page_content=' From these results, it is clear that the transfer learning technique can improve the convergence rate of T-GAN, and reduce the training overhead with the knowledge from the pre-trained model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
177 |
+
page_content=' nDoor - Wooden wall Glass wall Concrete wall Metal pillars a g ka D 业下 不业 Rx 1 ~ Rx 15 Rx 16 ~ Rx 21 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
178 |
+
page_content='86 m 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
179 |
+
page_content='26 m N TX 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
180 |
+
page_content='93 m 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
181 |
+
page_content='88 m F 5 m 3 19 m S 31 m 58 m D buD ID C 9000 100 200 300 400 [ns] 85 80 75 70 65 60 55 50 Power [dB] Measurement 3GPP GAN T-GAN (a) Samples of PDP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
182 |
+
page_content=' 0 100 200 300 400 [ns] 82 80 78 76 74 72 70 68 66 Power [dB] Measurement 3GPP GAN T-GAN (b) Average PDP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
183 |
+
page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
184 |
+
page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
185 |
+
page_content=' Plot of PDPs generated by measurement, 3GPP, the proposed GAN and T-GAN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
186 |
+
page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
187 |
+
page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
188 |
+
page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
189 |
+
page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
190 |
+
page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
191 |
+
page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
192 |
+
page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
193 |
+
page_content='9 SSIM 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
194 |
+
page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
195 |
+
page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
196 |
+
page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
197 |
+
page_content='8 1 Culmultative probability function 3GPP GAN T-GAN Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
198 |
+
page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
199 |
+
page_content=' SSIM of PDP for 3GPP, the proposed GAN and T-GAN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
200 |
+
page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
201 |
+
page_content=' Power Delay Profile In the experiment, the samples of PDP from measurement, 3GPP method, the proposed GAN and T-GAN are compared as in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
202 |
+
page_content=' 6(a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
203 |
+
page_content=' It is clear that the PDPs are similar to each other, which proves that the proposed GAN and T-GAN can learn the features of PDPs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
204 |
+
page_content=' Moreover, it is observed that PDP of measurement is more complex than PDP of 3GPP method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
205 |
+
page_content=' There are more peaks and fluctuations in the temporal domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
206 |
+
page_content=' This shows that 3GPP cannot well capture the channel effects embedded in PDP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
207 |
+
page_content=' Comparing PDPs generated by the proposed GAN and T-GAN, the PDP generated by T-GAN is close to measurement, while the PDP generated by the proposed GAN is similar to the 3GPP approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
208 |
+
page_content=' This is reasonable, since the T-GAN can capture the features of PDP from measurement 0 20 40 60 80 100 120 140 Delay spread [ns] 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
209 |
+
page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
210 |
+
page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
211 |
+
page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
212 |
+
page_content='8 1 Culmulative probability function Measurement 3GPP GAN T-GAN Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
213 |
+
page_content=' 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
214 |
+
page_content=' Delay spread for 3GPP, the proposed GAN and T-GAN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
215 |
+
page_content=' through transfer learning, while the propose GAN can only learn the features of the simulated PDPs by 3GPP method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
216 |
+
page_content=' In addition, the average PDPs for these method are plotted in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
217 |
+
page_content=' 6(b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
218 |
+
page_content=' It is clear that T-GAN shows good agreement with measurement, while 3GPP and GAN have large deviations from measurement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
219 |
+
page_content=' The deviations can be measured by root- mean-square error (RMSE), calculated as RMSE = � 1 Nτ � (Pm(i∆τ) − Pg(i∆τ))2, (7) where Nτ denotes the number of sampling points in PDP, i indexs temporal sample points of PDPs, Nτ represents the number of sampling points and ∆τ is the sampling interval.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
220 |
+
page_content=' Moreover, Pm(i∆τ) and Pg(i∆τ) are the average power in the ith sample point of measured PDPs and generated PDPs, re- spectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
221 |
+
page_content=' The results of RMSE for 3GPP, the proposed GAN and T-GAN are 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
222 |
+
page_content='29 dB, 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
223 |
+
page_content='12 dB and -4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
224 |
+
page_content='82 dB, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
225 |
+
page_content=' The T-GAN improves the performance of RMSE by about 9 dB, compared with other methods, which demonstrates that the T-GAN outperforms the other methods in terms of modeling the average power of PDP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
226 |
+
page_content=' This is attributed to the powerful capability of GAN in modeling the complex distribution, and the benefits of transfer learning in better utilizing the small measurement dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
227 |
+
page_content=' Moreover, to measure the similarity quantitatively, Structure Similarity Index Measure (SSIM) is introduced, which is widely applied to evaluate the quality and similarity of images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
228 |
+
page_content=' The range of SSIM is from 0 to 1, and the value of SSIM is larger when the similarity between images is higher.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
229 |
+
page_content=' The PDPs generated by 3GPP method, the proposed GAN and T-GAN are compared with measurement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
230 |
+
page_content=' The cumulative probability functions (CDFs) of SSIM for these method are shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
231 |
+
page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
232 |
+
page_content=' It can be observed that the proposed T-GAN can achieve higher SSIM values compared with other methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
233 |
+
page_content=' More than 40% of SSIM values are higher than 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
234 |
+
page_content='6 for T- GAN, compared to only 20% for 3GPP and the proposed GAN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
235 |
+
page_content=' This further demonstrates the better performance of T- GAN in modeling the PDPs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
236 |
+
page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
237 |
+
page_content=' Delay Spread Delay spread characterizes the power dispersion of multi- path components in the temporal domain, which can be calculated as the second central moment of PDPs, by ¯τ = �Nτ i=0 i∆τP(i∆τ)∆τ �Nτ i=0 P(i∆τ)∆τ , τrms = � � � � �Nτ i=0(i∆τ − ¯τ)2P(i∆τ)∆τ �Nτ i=0 P(i∆τ)∆τ , (8) where ¯τ denotes the mean delay weighted by the power, τrms refers to the root-mean-square (RMS) delay spread, and P(i∆τ) are the power in the ith sample point of PDPs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
238 |
+
page_content=' Then, the CDF plot of delay spread for measurement, 3GPP, the proposed GAN and T-GAN is depicted in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
239 |
+
page_content=' 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
240 |
+
page_content=' It can be observed that the CDFs of delay spread for 3GPP, the proposed GAN and T-GAN match the measurement well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
241 |
+
page_content=' IV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
242 |
+
page_content=' CONCLUSION In this paper, we proposed a T-GAN based THz channel modeling method, which can capture the distribution of PDPs for the THz channel with the designed GAN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
243 |
+
page_content=' Moreover, the transfer learning is exploited in T-GAN to reduce the size requirement of training dataset and enhance the performance of GAN, through transferring the knowledge stored in the pre-trained GAN on the simulated dataset to the target T- GAN trained on limited measurement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
244 |
+
page_content=' Finally, we validate the performance of T-GAN with measurement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
245 |
+
page_content=' T-GAN can generate the PDPs that have good agreement with measure- ment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
246 |
+
page_content=' Compared with conventional methods, T-GAN has better performance in modeling the distribution of PDPs, with 9 dB improved RMSE and higher SSIM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
247 |
+
page_content=' More than 40% of SSIM values are higher than 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
248 |
+
page_content='6 for T-GAN, compared to only 20% for 3GPP and the proposed GAN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
249 |
+
page_content=' REFERENCES [1] I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
250 |
+
page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
251 |
+
page_content=' Akyildiz, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
252 |
+
page_content=' Han, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
253 |
+
page_content=' Hu, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
254 |
+
page_content=' Nie, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
255 |
+
page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
256 |
+
page_content=' Jornet, “Terahertz band communication: An old problem revisited and research directions for the next decade (invited paper),” IEEE Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
257 |
+
page_content=' Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
258 |
+
page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
259 |
+
page_content=' 70, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
260 |
+
page_content=' 6, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
261 |
+
page_content=' 4250–4285, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
262 |
+
page_content=' [2] Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
263 |
+
page_content=' Chen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
264 |
+
page_content=', “Terahertz wireless communications for 2030 and beyond: A cutting-edge frontier,” IEEE Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
265 |
+
page_content=' Mag.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
266 |
+
page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
267 |
+
page_content=' 59, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
268 |
+
page_content=' 11, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
269 |
+
page_content=' 66– 72, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
270 |
+
page_content=' [3] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
271 |
+
page_content=' Han, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
272 |
+
page_content=' Wu, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
273 |
+
page_content=' Chen, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
274 |
+
page_content=' Chen, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
275 |
+
page_content=' Wang, “THz ISAC: A Physical- Layer Perspective of Terahertz Integrated Sensing and Communication,” arXiv preprint:2209.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
276 |
+
page_content='03145, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
277 |
+
page_content=' [4] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
278 |
+
page_content=' Petrus, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
279 |
+
page_content=' Reed, and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
280 |
+
page_content=' Rappaport, “Geometrical-based statistical macrocell channel model for mobile environments,” IEEE Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
281 |
+
page_content=' Com- mun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
282 |
+
page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
283 |
+
page_content=' 50, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
284 |
+
page_content=' 3, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
285 |
+
page_content=' 495–502, 2002.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
286 |
+
page_content=' [5] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
287 |
+
page_content=' Chen, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
288 |
+
page_content=' Yan, and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
289 |
+
page_content=' Han, “Hybrid spherical- and planar-wave modeling and DCNN-powered estimation of Terahertz Ultra-Massive MIMO channels,” IEEE Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
290 |
+
page_content=' Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
291 |
+
page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
292 |
+
page_content=' 69, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
293 |
+
page_content=' 10, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
294 |
+
page_content=' 7063– 7076, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
295 |
+
page_content=' [6] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
296 |
+
page_content=' Chen, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
297 |
+
page_content=' Yan, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
298 |
+
page_content=' Han, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
299 |
+
page_content=' Tao, “Millidegree-level direction- of-arrival estimation and tracking for terahertz ultra-massive mimo systems,” IEEE Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
300 |
+
page_content=' Wirel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
301 |
+
page_content=' Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
302 |
+
page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
303 |
+
page_content=' 21, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
304 |
+
page_content=' 2, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
305 |
+
page_content=' 869–883, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
306 |
+
page_content=' [7] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
307 |
+
page_content=' Wang, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
308 |
+
page_content='-K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
309 |
+
page_content=' Wen, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
310 |
+
page_content=' Jin, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
311 |
+
page_content=' Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
312 |
+
page_content=' Li, “Deep learning-based CSI feedback approach for time-varying massive MIMO channels,” IEEE Wirel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
313 |
+
page_content=' Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
314 |
+
page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
315 |
+
page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
316 |
+
page_content=' 8, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
317 |
+
page_content=' 2, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
318 |
+
page_content=' 416–419, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
319 |
+
page_content=' [8] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
320 |
+
page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
321 |
+
page_content=' O’Shea, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
322 |
+
page_content=' Roy, and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
323 |
+
page_content=' West, “Approximating the void: Learning stochastic channel models from observation with variational generative adversarial networks,” in Proc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
324 |
+
page_content=' Int.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
325 |
+
page_content=' Conf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
326 |
+
page_content=' Comput.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
327 |
+
page_content=', Netw.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
328 |
+
page_content=' Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
329 |
+
page_content=', 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
330 |
+
page_content=' 681–686.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
331 |
+
page_content=' [9] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
332 |
+
page_content=' Xiao, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
333 |
+
page_content=' Tian, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
334 |
+
page_content=' Liu, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
335 |
+
page_content=' Shen, “ChannelGan: Deep learning- based channel modeling and generating,” IEEE Wirel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
336 |
+
page_content=' Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
337 |
+
page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
338 |
+
page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
339 |
+
page_content=' 11, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
340 |
+
page_content=' 3, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
341 |
+
page_content=' 650–654, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
342 |
+
page_content=' [10] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
343 |
+
page_content=' Orekondy, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
344 |
+
page_content=' Behboodi, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
345 |
+
page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
346 |
+
page_content=' Soriaga, “MIMO-GAN: Generative MIMO channel modeling,” in Proc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
347 |
+
page_content=' IEEE Int.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
348 |
+
page_content=' Conf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
349 |
+
page_content=' Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
350 |
+
page_content=', 2022, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
351 |
+
page_content=' 5322–5328.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
352 |
+
page_content=' [11] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
353 |
+
page_content=' Wei, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
354 |
+
page_content='-M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
355 |
+
page_content=' Zhao, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
356 |
+
page_content='-J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
357 |
+
page_content=' Zhao, “Channel distribution learning: Model-driven gan-based channel modeling for irs-aided wireless com- munication,” IEEE Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
358 |
+
page_content=' Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
359 |
+
page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
360 |
+
page_content=' 70, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
361 |
+
page_content=' 7, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
362 |
+
page_content=' 4482–4497, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
363 |
+
page_content=' [12] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
364 |
+
page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
365 |
+
page_content=' Huynh and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
366 |
+
page_content=' Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
367 |
+
page_content=' Li, “Transfer learning for signal detection in wireless networks,” IEEE Wirel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
368 |
+
page_content=' Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
369 |
+
page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
370 |
+
page_content=', pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
371 |
+
page_content=' 1–1, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
372 |
+
page_content=' [13] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
373 |
+
page_content=' Wang, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
374 |
+
page_content=' Wu, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
375 |
+
page_content=' Herranz, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
376 |
+
page_content=' van de Weijer, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
377 |
+
page_content=' Gonzalez-Garcia, and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
378 |
+
page_content=' Raducanu, “Transferring gans: generating images from limited data,” in ECCV, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
379 |
+
page_content=' [14] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
380 |
+
page_content=' Li, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
381 |
+
page_content=' Wang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
382 |
+
page_content=' Chen, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
383 |
+
page_content=' Yu, and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
384 |
+
page_content=' Han, “Channel measurement and analysis in an indoor corridor scenario at 300 ghz,” in Proc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
385 |
+
page_content=' IEEE Int.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
386 |
+
page_content=' Conf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
387 |
+
page_content=' Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
388 |
+
page_content=', 2022, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
389 |
+
page_content=' 2888–2893.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
390 |
+
page_content=' [15] I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
391 |
+
page_content=' Gulrajani, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
392 |
+
page_content=' Ahmed, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
393 |
+
page_content=' Arjovsky, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
394 |
+
page_content=' Dumoulin, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
395 |
+
page_content=' Courville, “Improved training of Wasserstein GANs,” in Proc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
396 |
+
page_content=' Int.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
397 |
+
page_content=' Conf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
398 |
+
page_content=' Neural Inf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
399 |
+
page_content=' Process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
400 |
+
page_content=' Syst.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
401 |
+
page_content=', 2017, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
402 |
+
page_content=' 5769–5779.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
403 |
+
page_content=' [16] Study on Channel Model for Frequencies From 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
404 |
+
page_content='5 to 100 GHz (Release 15), document TR 38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
405 |
+
page_content='901, 3GPP, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tAzT4oBgHgl3EQfDvpk/content/2301.00981v1.pdf'}
|
2tE1T4oBgHgl3EQflgTe/content/2301.03287v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3a73294f6d766dcc1c27105c28d2820cd67c5f9fda070e130b89f37c7cf0c5d0
|
3 |
+
size 1495493
|
2tE1T4oBgHgl3EQflgTe/vector_store/index.faiss
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9b71792098cdc558dcd56cfe7f8d1861931ff682dfc02000e053aa44d1df814e
|
3 |
+
size 3866669
|
2tE1T4oBgHgl3EQflgTe/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a30a0d4868b0d08a647582eff8cc825a3ef9659c3d97ab4e9a24f6eaffad5b09
|
3 |
+
size 157027
|
3NE3T4oBgHgl3EQfoQom/content/tmp_files/2301.04631v1.pdf.txt
ADDED
@@ -0,0 +1,1043 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Deep Residual Axial Networks
|
2 |
+
Nazmul Shahadat, Anthony S. Maida
|
3 |
+
University of Louisiana at Lafayette
|
4 |
+
Lafayette LA 70504, USA
|
5 |
+
nazmul.ruet@gmail.com, maida@louisiana.edu
|
6 |
+
Abstract
|
7 |
+
While residual networks (ResNets) demonstrate out-
|
8 |
+
standing performance on computer vision tasks, their
|
9 |
+
computational cost still remains high. Here, we focus
|
10 |
+
on reducing this cost by proposing a new network archi-
|
11 |
+
tecture, axial ResNet, which replaces spatial 2D convo-
|
12 |
+
lution operations with two consecutive 1D convolution
|
13 |
+
operations. Convergence of very deep axial ResNets has
|
14 |
+
faced degradation problems which prevent the networks
|
15 |
+
from performing efficiently. To mitigate this, we apply a
|
16 |
+
residual connection to each 1D convolutional operation
|
17 |
+
and propose our final novel architecture namely residual
|
18 |
+
axial networks (RANs). Extensive benchmark evaluation
|
19 |
+
shows that RANs outperform with about 49% fewer pa-
|
20 |
+
rameters than ResNets on CIFAR benchmarks, SVHN,
|
21 |
+
and Tiny ImageNet image classification datasets. More-
|
22 |
+
over, our proposed RANs show significant improvement
|
23 |
+
in validation performance in comparison to the wide
|
24 |
+
ResNets on CIFAR benchmarks and the deep recursive
|
25 |
+
residual networks on image super resolution dataset.
|
26 |
+
1. Introduction
|
27 |
+
Deep convolutional neural network (CNN) based ar-
|
28 |
+
chitectures, specifically ResNets [11], have achieved
|
29 |
+
significant success for image processing tasks, includ-
|
30 |
+
ing classification [10,11,21], object detection [6,22] and
|
31 |
+
image super-resolution [17, 18, 28]. The performance
|
32 |
+
of deep ResNets and wide ResNets has improved in re-
|
33 |
+
cent years. Along with the increasing depth or widening
|
34 |
+
of ResNets, the computational cost of the networks also
|
35 |
+
rises. Moreover, training these deeper or wider networks
|
36 |
+
has faced exploding or vanishing gradient and degrada-
|
37 |
+
tion problems. Different initialization, optimization, and
|
38 |
+
normalization techniques [9,10,16,25,27,30], skip con-
|
39 |
+
nections [10], and transfer learning [5] have been used
|
40 |
+
used to mitigate these problems. The rising computa-
|
41 |
+
tional cost and/or trainable parameter is still unexplored
|
42 |
+
which is the main purpose of this paper.
|
43 |
+
However, the computational cost of these deeper and
|
44 |
+
wider ResNets has not been analyzed yet.
|
45 |
+
Deep or
|
46 |
+
wide ResNets gain popularity and impressive perfor-
|
47 |
+
mance due to their simple but effective architectures
|
48 |
+
[4, 8, 14, 31, 33]. Deep ResNets can be factored as en-
|
49 |
+
sembles of shallow networks [1] and represent func-
|
50 |
+
tions more efficiently for complex tasks than shallow
|
51 |
+
networks [2]. However, constructing deeper ResNets is
|
52 |
+
not as simple as adding more residual layers. The de-
|
53 |
+
sign of deeper ResNets demands better optimization and
|
54 |
+
initialization schemes, and proper use of identity con-
|
55 |
+
nections. Deeper ResNets have great success in image
|
56 |
+
classification and object detection tasks [10, 11]. How-
|
57 |
+
ever, the computational cost increases linearly with the
|
58 |
+
number of layers [12].
|
59 |
+
Wide ResNets use a shallow network with wide (high
|
60 |
+
channel count) architecture to attain better performance
|
61 |
+
than the deeper networks [4, 31, 33]. For example, [33]
|
62 |
+
represented their wide residual network as WRN-n-k
|
63 |
+
where n is the number of convolutional layers and k rep-
|
64 |
+
resents the widening factor. They have shown that their
|
65 |
+
WRN-28-10, wide ResNet that adopts 28 convolutional
|
66 |
+
layers with k = 10 widening factor, outperforms the
|
67 |
+
deep ResNet-1001 network (1001 layers). However, the
|
68 |
+
computational cost is quadratic with a widening factor
|
69 |
+
of k.
|
70 |
+
This work revisits the designs of deep and wide
|
71 |
+
ResNets to boost their performance further, reduce the
|
72 |
+
above-mentioned high computational costs, and im-
|
73 |
+
prove the model inference speed.
|
74 |
+
To get these, we
|
75 |
+
propose our novel architecture, residual axial networks
|
76 |
+
(RANs), obtained using axial operations, height or
|
77 |
+
width-axis, instead of spatial operations in the residual
|
78 |
+
block. Here, we split 2D spatial (3 × 3) convolution
|
79 |
+
operation into two consecutive 1D convolution opera-
|
80 |
+
tions. These 1D convolution operations are mapped to
|
81 |
+
the height-axis (3 × 1) and width-axis (1 × 3). As axial
|
82 |
+
1D convolution operations propagate information along
|
83 |
+
one axis at a time, this modification reduces cost sig-
|
84 |
+
nificantly. To capture global information, we use these
|
85 |
+
layers in consecutive pairs.
|
86 |
+
arXiv:2301.04631v1 [cs.CV] 11 Jan 2023
|
87 |
+
|
88 |
+
Figure 1. Residual block architectures. “bn” stands for batch
|
89 |
+
normalization. (Left) ResNet basic block and (Right) ResNet
|
90 |
+
bottleneck blocks are depicted.
|
91 |
+
A simple axial architecture reduces cost but does not
|
92 |
+
improve performance. The reason is that forward in-
|
93 |
+
formation flows across the axial blocks degrades (di-
|
94 |
+
minishing feature reuse [15]). To address this, we add
|
95 |
+
residual connections to span the axial blocks. By using
|
96 |
+
both modifications, we made a novel, effective residual
|
97 |
+
axial architecture (RAN). The effectiveness of our pro-
|
98 |
+
posed model is demonstrated experimentally on four im-
|
99 |
+
age classification datasets and an image super-resolution
|
100 |
+
dataset. Our assessments are based on parameter counts,
|
101 |
+
FLOPS counts (number of multiply-add operations), la-
|
102 |
+
tency to process one image after training, and validation
|
103 |
+
accuracy.
|
104 |
+
2. Background and Related Work
|
105 |
+
2.1. Convolutional Neural Networks
|
106 |
+
In a convolutional layer, the core building block is
|
107 |
+
a convolution operation using one kernel W applied to
|
108 |
+
small neighborhoods to find input correlations. For an
|
109 |
+
input image X with height h, width w, and channel
|
110 |
+
count din, the convolution operation operates on region
|
111 |
+
(a, b) ∈ X centered at pixel (i, j) with spatial extend k.
|
112 |
+
The output for this operation Co where o = (i, j) is [26],
|
113 |
+
Co =
|
114 |
+
�
|
115 |
+
(a,b)∈Nk×k(i,j)
|
116 |
+
W (m)
|
117 |
+
i−a,j−b, xa,b
|
118 |
+
(1)
|
119 |
+
where Nk×k is the neighborhood of pixel (i, j) with spa-
|
120 |
+
tial extent k, and W is the shared weights to calculate the
|
121 |
+
output for all pixel positions (i, j).
|
122 |
+
2.2. Residual Networks
|
123 |
+
Residual networks (ResNets) are constructed using
|
124 |
+
convolutional layers linked by additive identity connec-
|
125 |
+
tions [12]. They were introduced to address the problem
|
126 |
+
of vanishing gradients found in standard deep CNNs.
|
127 |
+
Although, the vanishing gradient problem may be ad-
|
128 |
+
dressed by using normalized inputs and normalization
|
129 |
+
layers which help to make networks till ten layers. In
|
130 |
+
this situation, when more layers were stacked, the net-
|
131 |
+
work depth increases but accuracy gets saturated and
|
132 |
+
then degrades rapidly. The degradation (of training ac-
|
133 |
+
curacy) indicates that not all systems are similarly opti-
|
134 |
+
mized. To address these problems, He et al. proposed
|
135 |
+
residual networks by adding identity mapping among
|
136 |
+
the layers [12]. As a result, the subsequent deeper lay-
|
137 |
+
ers are shared inputs from the learned shallower model.
|
138 |
+
This helps to address all of the problems.
|
139 |
+
The key architectural feature of ResNets is the resid-
|
140 |
+
ual block with identity mapping to tackle the degrada-
|
141 |
+
tion problem. Two kinds of residual blocks are used
|
142 |
+
in residual networks, the basic block and the bottleneck
|
143 |
+
block, both depicted in Figure 1. Figure 1 (left) is known
|
144 |
+
as the basic architecture of ResNet which is constructed
|
145 |
+
with two k × k convolution layers where k is the size of
|
146 |
+
the kernel and an identity shortcut connection is added
|
147 |
+
to the end of these two layers to address vanishing gradi-
|
148 |
+
ents. These operations can be expressed mathematically
|
149 |
+
as,
|
150 |
+
y = F(Ckm,kn(x, W)) + x
|
151 |
+
(2)
|
152 |
+
where F, x, y, W, and Ckm,kn represent residual func-
|
153 |
+
tion, input vector, output vector, weight parameters, and
|
154 |
+
output of two convolution layers with kernels Km and
|
155 |
+
Kn respectively. Figure 1 (right) is a bottleneck archi-
|
156 |
+
tecture that is constructed using 1 × 1, k × k, and 1 × 1
|
157 |
+
convolution layers, where the 1 × 1 layers reduce and
|
158 |
+
then increase the number of channels, and the 3×3 layer
|
159 |
+
performs feature extraction. The identity shortcuts (ex-
|
160 |
+
plained in equation 3) are very important for this block
|
161 |
+
as it leads to more efficient designs [11]. These can be
|
162 |
+
expressed mathematically as,
|
163 |
+
y = F(Ck1,km,k1(x, W)) + x
|
164 |
+
(3)
|
165 |
+
where F, x, y, W, and Ck1,km,k1 represent residual
|
166 |
+
function, input vector, output vector, weight parame-
|
167 |
+
ters, and output of three convolution layers with kernels
|
168 |
+
1 × 1, Km × Km and 1 × 1 respectively. Its perfor-
|
169 |
+
mance surpasses the learning speed, number of learning
|
170 |
+
parameters, way of layer-wise representation, difficult
|
171 |
+
optimization property, and memory mechanisms.
|
172 |
+
2.3. Wide Residual Networks
|
173 |
+
Wide ResNets [4, 33] use fewer layers compared to
|
174 |
+
standard ResNets but use high channel counts (wide ar-
|
175 |
+
chitectures) which compensate for the shallower archi-
|
176 |
+
tecture. The comparison between shallow and deep net-
|
177 |
+
works has been revealed in circuit complexity theory
|
178 |
+
|
179 |
+
X
|
180 |
+
X
|
181 |
+
3x3 conv2d
|
182 |
+
1x1 conv2d
|
183 |
+
bn
|
184 |
+
bn
|
185 |
+
relu
|
186 |
+
★ relu
|
187 |
+
3x3 conv2d
|
188 |
+
3x3 conv2d
|
189 |
+
bn
|
190 |
+
bn
|
191 |
+
relu
|
192 |
+
1x1 conv2d
|
193 |
+
relu
|
194 |
+
bn
|
195 |
+
reluwhere shallow circuits require more components than
|
196 |
+
the deeper circuit. Inspired by this observation, [11] pro-
|
197 |
+
posed deeper networks with thinner architecture where
|
198 |
+
a gradient goes through the layers.
|
199 |
+
But the problem
|
200 |
+
such networks face is that the residual block weights
|
201 |
+
do not flow through the network layers. For this, the
|
202 |
+
network may be forced to avoid learning during train-
|
203 |
+
ing. To address these issues, [33] proposed shallow but
|
204 |
+
wide network architectures and showed that widening
|
205 |
+
the residual blocks improves the performance of resid-
|
206 |
+
ual networks compared to increasing their depth. For
|
207 |
+
example, a 16-layer wide ResNet has similar accuracy
|
208 |
+
performance to a 1000-layer thinner network.
|
209 |
+
2.4. Recursive Residual Networks
|
210 |
+
Image super-resolution (SR) is the process of gen-
|
211 |
+
erating a high-resolution (HR) image from a low-
|
212 |
+
resolution (LR) image.
|
213 |
+
It is also known as single
|
214 |
+
image super-resolution (SISR). A list of convolution-
|
215 |
+
based models has shown promising results on SISR
|
216 |
+
[7, 17, 18, 28]. These 2D convolutional networks learn
|
217 |
+
a nonlinear mapping from an LR to an HR image in an
|
218 |
+
end-to-end manner. Convolution-based recursive neural
|
219 |
+
networks have been used on SISR, where recursive net-
|
220 |
+
works learn detailed and structured information about an
|
221 |
+
image. As image SR requires more image details, pool-
|
222 |
+
ing is not used in deep models for SISR. Convolution-
|
223 |
+
based SR [7] has shown that the convolution-based LR-
|
224 |
+
HR mapping significantly improves performance for
|
225 |
+
classical shallow methods. Kim et al., introduce two
|
226 |
+
deep CNNs for SR by stacking weight layers [17, 18].
|
227 |
+
Among them, [18] uses a chain structure recursive layer
|
228 |
+
along with skip-connections to control the model pa-
|
229 |
+
rameters and improve the performance. Deep SR mod-
|
230 |
+
els [17,18,23] demand large parameter counts and more
|
231 |
+
storage.
|
232 |
+
To address these issues, deep recursive residual net-
|
233 |
+
works (DRRNs) were proposed as a very deep network
|
234 |
+
structure, which achieves better performance with fewer
|
235 |
+
parameters [28]. It includes both local and global resid-
|
236 |
+
ual learning, where global residual learning (GRL) is be-
|
237 |
+
ing used in the identity branch to estimate the residual
|
238 |
+
image from the input and output of the network. GRL
|
239 |
+
might face degradation problems for deeper networks.
|
240 |
+
To handle this problem, local residual learning (LRL)
|
241 |
+
has been used which carries rich image details to deeper
|
242 |
+
layers and helps gradient flow. The DRRN also used
|
243 |
+
recursive learning of residual units to keep the model
|
244 |
+
more compact. Several recursive blocks (B) has been
|
245 |
+
stacked, followed by a CNN layer which is used to re-
|
246 |
+
construct the residual between the LR and HR images.
|
247 |
+
Each of these residual blocks decomposed into a num-
|
248 |
+
ber of residual units (U). The number of recursive block
|
249 |
+
B, and the number of residual units U are responsible
|
250 |
+
for defining network depth. The depth of DRRN d is
|
251 |
+
calculated as,
|
252 |
+
d = (1 + 2 × U) × B + 1
|
253 |
+
(4)
|
254 |
+
Recursive block definition, DRRN formulation, and the
|
255 |
+
loss function of DRRN are defined in [28].
|
256 |
+
3. Proposed Residual Axial Networks
|
257 |
+
The convolution-based residual basic and bottleneck
|
258 |
+
blocks [11, 12] have demonstrated significant perfor-
|
259 |
+
mance with the help of several state-of-the-art archi-
|
260 |
+
tectures like, ResNets [11], wide ResNets [31], scal-
|
261 |
+
ing wide ResNets [33], and deep recursive residual net-
|
262 |
+
works (DRRNs) [28] on image classification and im-
|
263 |
+
age super-resolution datasets. Although the bottleneck
|
264 |
+
residual block makes the networks thinner still the ba-
|
265 |
+
sic and bottleneck blocks are not cost-effective and/or
|
266 |
+
parameter efficient. The 2D convolutional operation of
|
267 |
+
these blocks is consuming O(N 2) resources, where N is
|
268 |
+
the flattened pixels of an image, and N = hw (for a 2D
|
269 |
+
image of height h, width w, and h = w). So the cost for
|
270 |
+
a 2D convolutional operation, for an image with height
|
271 |
+
h, and width w, is O((hw)2) = O(h2w2) = O(h4)
|
272 |
+
[13, 29]. To reduce this impractical computational cost,
|
273 |
+
we are proposing a novel architectural design, residual
|
274 |
+
axial networks (RANs).
|
275 |
+
Due to high computational expenses, we replace all
|
276 |
+
spatial 2D convolution operations (conv2D) of the resid-
|
277 |
+
ual basic blocks, and the only spatial 2D convolution op-
|
278 |
+
eration of the residual bottleneck block by using two 1D
|
279 |
+
convolutional operations. Also, each 1D convolutional
|
280 |
+
operation has a residual connection to reduce vanishing
|
281 |
+
gradients. Although this axial technique was introduced
|
282 |
+
in [13] for auto-regressive transformer models, we pro-
|
283 |
+
pose novel architectures by factorizing 2D convolution
|
284 |
+
into two consecutive 1D convolutions. Figures 2, and 3
|
285 |
+
show our novel proposed residual blocks.
|
286 |
+
For each location, o = (i, j), a local input kernel k×k
|
287 |
+
is extracted from an input image X with height h, width
|
288 |
+
w, and channel count din to serve convolutional opera-
|
289 |
+
tion. Residual units, used by [12], are defined as,
|
290 |
+
Yo = R(Xo) + F(Xo, Wo)
|
291 |
+
(5)
|
292 |
+
where, Xo and Yo are input and output for the location
|
293 |
+
o = (i, j), R(Xo) is the original input or identity map-
|
294 |
+
ping, and F is the residual function. This residual func-
|
295 |
+
tion is defined using convolutional operation for vision
|
296 |
+
tasks. The structure of this residual function depends on
|
297 |
+
the residual block, we use. Two spatial 2D convolutional
|
298 |
+
operations are used for residual basic block, and a spa-
|
299 |
+
tial (kernel k > 1) 2D convolution operation is used in
|
300 |
+
between two convolutional operations (kernel k = 1) for
|
301 |
+
|
302 |
+
Figure 2. RAN basic block used in our proposed networks. “bn” stands for batch normalization.
|
303 |
+
Figure 3. RAN bottleneck block used in our proposed networks. “bn” stands for batch normalization.
|
304 |
+
bottleneck block. These spatial 2D convolutional oper-
|
305 |
+
ations for kernel k > 1 and o = (i, j) can be defined
|
306 |
+
as [26],
|
307 |
+
Co =
|
308 |
+
�
|
309 |
+
(a,b)∈Nk×k(o)
|
310 |
+
Wi−a,j−b, xa,b
|
311 |
+
(6)
|
312 |
+
where, Nk ∈ Rk×k×din is the neighborhood of pixel
|
313 |
+
(i, j) with the spatial square region k × k and W ∈
|
314 |
+
Rk×k×dout×din is the shared weights that are for cal-
|
315 |
+
culating output for all pixel positions centered by (i, j).
|
316 |
+
The computational cost is O(hwk2) which is high.
|
317 |
+
To reduce this computation cost and make parame-
|
318 |
+
ter efficient architecture, we propose to adopt the axial
|
319 |
+
concept and replace 2D convolution using two 1D con-
|
320 |
+
volutions with residual connections. These two 1D con-
|
321 |
+
volutions are performing convolution along the height
|
322 |
+
axis and the width axis. The 1D convolution along the
|
323 |
+
height axis is defined as follows.
|
324 |
+
Ch =
|
325 |
+
�
|
326 |
+
(a,b)∈Nk×1(i,j)
|
327 |
+
Wi−a,j−b, xa,b
|
328 |
+
(7)
|
329 |
+
where, Nk ∈ Rk×1×din is the neighborhood of pixel
|
330 |
+
(i, j) with spatial extent k × 1 and W ∈ Rk×1×dout×din
|
331 |
+
is the shared weights that are for calculating output for
|
332 |
+
all pixel positions (i, j). And, for width axis is as fol-
|
333 |
+
lows.
|
334 |
+
Cw =
|
335 |
+
�
|
336 |
+
(a,b)∈N1×k(i,j)
|
337 |
+
Wi−a,j−b, xa,b
|
338 |
+
(8)
|
339 |
+
where, Nk ∈ R1×k×din is the neighborhood of pixel
|
340 |
+
(i, j) with spatial extent 1 × k and W ∈ R1×k×dout×din
|
341 |
+
is the shared weights that are for calculating output for
|
342 |
+
all pixel positions (i, j).
|
343 |
+
To construct our basic and bottleneck blocks, we re-
|
344 |
+
place each 2D convolution layer from the original resid-
|
345 |
+
ual blocks with a pair of consecutive 1D convolution
|
346 |
+
layers. When we did this but omitted a residual connec-
|
347 |
+
tion, the network faced the vanishing gradient problem.
|
348 |
+
To handle this, we added a residual connection along
|
349 |
+
each 1D convolution operation. Each 2D convolution in
|
350 |
+
Equation 6 is equivalent to our proposed method defined
|
351 |
+
as,
|
352 |
+
Yh = Ch(Wh, Xo) + Xo
|
353 |
+
(9)
|
354 |
+
Yo = Cw(Ww, Yh) + Yh
|
355 |
+
(10)
|
356 |
+
where, Ch, and Cw are the height and width outputs of
|
357 |
+
Equations 7 and 8, respectively, Wh, and Ww is the con-
|
358 |
+
volutional weights for height, and width axis 1D convo-
|
359 |
+
lutional operations, respectively. Equation 10 describes
|
360 |
+
the residual basic and bottleneck blocks. As two 1D op-
|
361 |
+
erations equal one 2D operation, the use of these two
|
362 |
+
|
363 |
+
....
|
364 |
+
1x3 Conv1D
|
365 |
+
3x1 Conv1D
|
366 |
+
Height-Axis
|
367 |
+
Width-Axis
|
368 |
+
1x3 Conv1D
|
369 |
+
Width-Axis
|
370 |
+
I.
|
371 |
+
relu
|
372 |
+
X D
|
373 |
+
D
|
374 |
+
.S
|
375 |
+
Conv1D
|
376 |
+
Vidth-Axis
|
377 |
+
D
|
378 |
+
Conv1l
|
379 |
+
Axi
|
380 |
+
2
|
381 |
+
2
|
382 |
+
E
|
383 |
+
Conv
|
384 |
+
bn
|
385 |
+
n
|
386 |
+
0
|
387 |
+
C
|
388 |
+
c
|
389 |
+
3
|
390 |
+
X
|
391 |
+
X
|
392 |
+
H
|
393 |
+
X
|
394 |
+
X
|
395 |
+
3layers does not increase the layer count. The RAN ba-
|
396 |
+
sic and bottleneck blocks are shown in Figures 2 and 3.
|
397 |
+
These blocks are used to construct our proposed residual
|
398 |
+
axial networks (RANs). The output Yo from Equation 10
|
399 |
+
is applied to other 2D convolution-based networks, for
|
400 |
+
example, wide residual networks (to make our proposed
|
401 |
+
wide RANs) and deep recursive residual networks (to
|
402 |
+
make RARNets), to check the effectiveness of our pro-
|
403 |
+
posed method.
|
404 |
+
4. Experimental Analysis
|
405 |
+
We present experimental results on four image classi-
|
406 |
+
fication datasets and one image super-resolution dataset.
|
407 |
+
Our experiments evaluate the proposed residual axial
|
408 |
+
networks, the original ResNets, the wide ResNets, wide
|
409 |
+
RANs, the deep recursive residual networks (DRRNs),
|
410 |
+
and RARNets. We compare our proposed network’s per-
|
411 |
+
formance with the corresponding original ResNets, as
|
412 |
+
these original networks used 2D spatial convolutional
|
413 |
+
layers. Our comparisons use parameter counts, FLOPS,
|
414 |
+
latency, and validation performance.
|
415 |
+
4.1. Method: Residual Networks
|
416 |
+
To explore scalability, we compare our proposed
|
417 |
+
RANs and baseline models on four datasets: CIFAR-
|
418 |
+
10 and CIFAR-100 benchmarks [19], Street View House
|
419 |
+
Number (SVHN) [24], and Tiny ImageNet datasets
|
420 |
+
[20].
|
421 |
+
The CIFAR benchmarks have 10 and 100 dis-
|
422 |
+
tinct classes, and 60,000 color images (split into 50,000
|
423 |
+
training and 10,000 testing images) of size 32 × 32. We
|
424 |
+
perform data normalization using per-channel mean and
|
425 |
+
standard deviation. In preprocessing, we do a horizontal
|
426 |
+
flip and randomly crop after padding with four pixels on
|
427 |
+
each side of the image. The SVHN and Tiny ImageNet
|
428 |
+
datasets contain 600,000 images of size 32 × 32 with
|
429 |
+
ten classes and 110,000 images of 200 distinct classes
|
430 |
+
downsized to 64 × 64 colored images, respectively. Our
|
431 |
+
only preprocessing is mean/std normalization for both
|
432 |
+
datasets.
|
433 |
+
All the models (baselines and proposed RANs) were
|
434 |
+
trained using similar architectures (same hyperparame-
|
435 |
+
ters and the same number of output channels). As our
|
436 |
+
main concern was to reduce the parameter counts of the
|
437 |
+
bottleneck residual block, we implemented all network
|
438 |
+
architecture, baselines, and proposed, using only bottle-
|
439 |
+
neck blocks. The numbers of output channels of bottle-
|
440 |
+
neck groups are 120, 240, 480,, and 960 for all networks.
|
441 |
+
This experiment analyzes 26, 35, 50, 101, and 152-
|
442 |
+
layer architectures with the bottleneck block multipliers
|
443 |
+
“[1, 2, 4, 1]”, “[2, 3, 4, 2]”, “[3, 4, 6, 3]”, “[3, 4, 23, 3]”,
|
444 |
+
and “[3, 8, 36, 3]”, respectively. All models were run us-
|
445 |
+
ing the stochastic gradient descent optimizer, and using
|
446 |
+
linearly warmed-up learning for 10 epochs from zero
|
447 |
+
to 0.1 and then used cosine learning scheduling from
|
448 |
+
epochs 11 to 150. All models were trained using batch
|
449 |
+
sizes of 128 for all datasets, we used except the 101, and
|
450 |
+
152-layer architectures of the Tiny ImageNet dataset.
|
451 |
+
We used a batch size of 64 for these two architectures
|
452 |
+
on Tiny ImageNet.
|
453 |
+
4.2. Results: Residual Networks
|
454 |
+
Table 1 summarizes the classification results of the
|
455 |
+
original ResNets and our proposed RANs on the four
|
456 |
+
datasets. We tested shallow and deeper networks by im-
|
457 |
+
plementing 26, 35, 50, 101, and 152-layer architectures.
|
458 |
+
These architectures compare performance to check the
|
459 |
+
effectiveness of our proposed methods for shallow and
|
460 |
+
deep networks. Our proposed method is compared with
|
461 |
+
original ResNets in terms of parameter count, FLOPS
|
462 |
+
count (number of multiply-add operations), inference
|
463 |
+
time or latency (time used to test one image after train-
|
464 |
+
ing), and the percentage accuracy of validation results
|
465 |
+
on the four datasets.
|
466 |
+
The 26, 35, 50, 101, and 152-layer architectures re-
|
467 |
+
duce by 48.6%, 46.5%, 44.8%, 43.2%, and 42.6% the
|
468 |
+
trainable parameters respectively needed in comparison
|
469 |
+
to the baseline networks. In addition to parameter re-
|
470 |
+
duction, our proposed method requires 15 to 36 percent
|
471 |
+
fewer FLOPS for all analyzed architectures. Also, the
|
472 |
+
validation performance improvement is significantly no-
|
473 |
+
ticeable for all datasets. The latency to process one im-
|
474 |
+
age after training our proposed models is comparatively
|
475 |
+
high as the RANs use two convolution layers sequen-
|
476 |
+
tially. It is also shown that the deeper networks perform
|
477 |
+
better than the shallow networks and it has demonstrated
|
478 |
+
“the deeper, the better” in classification.
|
479 |
+
4.3. Method: Wide Residual Networks
|
480 |
+
The previous experiment did not assess wide
|
481 |
+
ResNets.
|
482 |
+
To assess the widening factor on our pro-
|
483 |
+
posed RANs, we increase the width of our RANs by
|
484 |
+
factorizing the number of output channels for shallow
|
485 |
+
networks like [33].
|
486 |
+
Like the original wide residual
|
487 |
+
networks (WRNs) [33], we analyzed our proposed 26-
|
488 |
+
layer bottleneck block of RANs with a widening factor,
|
489 |
+
k = 2, 4, 6, 8, and 10. We multiplied the number of out-
|
490 |
+
put channels of RANs with k to obtain wide RANs. We
|
491 |
+
performed training with the same optimizer and hyper-
|
492 |
+
parameters used in 4.1.
|
493 |
+
4.4. Results: Wide Residual Networks
|
494 |
+
Table 1 shows “the deeper the better” in vision classi-
|
495 |
+
fication for our proposed methods. To compare our pro-
|
496 |
+
posed RANs with the original wide ResNets (WRNs),
|
497 |
+
we analyze our proposed method for different widening
|
498 |
+
factors. Table 2 shows an overall comparison among the
|
499 |
+
original WRN-28-10 (28-layers with a widening factor
|
500 |
+
|
501 |
+
Dataset
|
502 |
+
Model Name
|
503 |
+
Layers
|
504 |
+
Params
|
505 |
+
FLOPs
|
506 |
+
Latency
|
507 |
+
Accuracy
|
508 |
+
CIFAR-10
|
509 |
+
ResNet [11]
|
510 |
+
26
|
511 |
+
40.9M
|
512 |
+
0.66G
|
513 |
+
0.66ms
|
514 |
+
94.68
|
515 |
+
RAN (Ours)
|
516 |
+
21M
|
517 |
+
0.56G
|
518 |
+
0.73ms
|
519 |
+
96.08
|
520 |
+
ResNet [11]
|
521 |
+
35
|
522 |
+
57.8M
|
523 |
+
0.86G
|
524 |
+
0.82ms
|
525 |
+
94.95
|
526 |
+
RAN (Ours)
|
527 |
+
30.9M
|
528 |
+
0.68G
|
529 |
+
0.91ms
|
530 |
+
96.15
|
531 |
+
ResNet [11]
|
532 |
+
50
|
533 |
+
82.5M
|
534 |
+
1.18G
|
535 |
+
1.02ms
|
536 |
+
95.08
|
537 |
+
RAN (Ours)
|
538 |
+
45.5M
|
539 |
+
0.87G
|
540 |
+
1.17ms
|
541 |
+
96.25
|
542 |
+
ResNet [11]
|
543 |
+
101
|
544 |
+
149.2M
|
545 |
+
2.29G
|
546 |
+
1.68ms
|
547 |
+
95.36
|
548 |
+
RAN (Ours)
|
549 |
+
84.7M
|
550 |
+
1.52G
|
551 |
+
1.86ms
|
552 |
+
96.27
|
553 |
+
ResNet [11]
|
554 |
+
152
|
555 |
+
204.1M
|
556 |
+
3.41G
|
557 |
+
2.39ms
|
558 |
+
95.36
|
559 |
+
RAN (Ours)
|
560 |
+
117.1M
|
561 |
+
2.18G
|
562 |
+
2.55ms
|
563 |
+
96.37
|
564 |
+
CIFAR-100
|
565 |
+
ResNet [11]
|
566 |
+
26
|
567 |
+
41.2M
|
568 |
+
0.66G
|
569 |
+
0.66ms
|
570 |
+
78.21
|
571 |
+
RAN (Ours)
|
572 |
+
21.1M
|
573 |
+
0.56G
|
574 |
+
0.74ms
|
575 |
+
79.66
|
576 |
+
ResNet [11]
|
577 |
+
35
|
578 |
+
58.1M
|
579 |
+
0.86G
|
580 |
+
0.80ms
|
581 |
+
78.72
|
582 |
+
RAN (Ours)
|
583 |
+
31.1M
|
584 |
+
0.68G
|
585 |
+
0.91ms
|
586 |
+
80.38
|
587 |
+
ResNet [11]
|
588 |
+
50
|
589 |
+
82.9M
|
590 |
+
1.18G
|
591 |
+
1.11ms
|
592 |
+
78.95
|
593 |
+
RAN (Ours)
|
594 |
+
45.7M
|
595 |
+
0.87G
|
596 |
+
1.17ms
|
597 |
+
80.84
|
598 |
+
ResNet [11]
|
599 |
+
101
|
600 |
+
149.5M
|
601 |
+
2.29G
|
602 |
+
1.72ms
|
603 |
+
78.80
|
604 |
+
RAN (Ours)
|
605 |
+
84.9M
|
606 |
+
1.52G
|
607 |
+
1.86ms
|
608 |
+
80.88
|
609 |
+
ResNet [11]
|
610 |
+
152
|
611 |
+
204.5M
|
612 |
+
3.41G
|
613 |
+
2.36ms
|
614 |
+
79.85
|
615 |
+
RAN (Ours)
|
616 |
+
117.2M
|
617 |
+
2.18G
|
618 |
+
2.55ms
|
619 |
+
80.94
|
620 |
+
SVHN
|
621 |
+
ResNet [11]
|
622 |
+
26
|
623 |
+
40.9M
|
624 |
+
0.66G
|
625 |
+
0.64ms
|
626 |
+
96.04
|
627 |
+
RAN (Ours)
|
628 |
+
21M
|
629 |
+
0.56G
|
630 |
+
0.73ms
|
631 |
+
97.60
|
632 |
+
ResNet [11]
|
633 |
+
35
|
634 |
+
57.8M
|
635 |
+
0.86G
|
636 |
+
0.79ms
|
637 |
+
95.74
|
638 |
+
RAN (Ours)
|
639 |
+
30.9M
|
640 |
+
0.68G
|
641 |
+
0.90ms
|
642 |
+
97.50
|
643 |
+
ResNet [11]
|
644 |
+
50
|
645 |
+
82.5M
|
646 |
+
1.18G
|
647 |
+
1.05ms
|
648 |
+
95.76
|
649 |
+
RAN (Ours)
|
650 |
+
45.5M
|
651 |
+
0.87G
|
652 |
+
1.11ms
|
653 |
+
97.32
|
654 |
+
ResNet [11]
|
655 |
+
101
|
656 |
+
149.2M
|
657 |
+
2.29G
|
658 |
+
1.64ms
|
659 |
+
96.29
|
660 |
+
RAN (Ours)
|
661 |
+
84.7M
|
662 |
+
1.52G
|
663 |
+
1.80ms
|
664 |
+
97.29
|
665 |
+
ResNet [11]
|
666 |
+
152
|
667 |
+
204.1M
|
668 |
+
3.41G
|
669 |
+
2.28ms
|
670 |
+
96.35
|
671 |
+
RAN (Ours)
|
672 |
+
117.1M
|
673 |
+
2.18G
|
674 |
+
2.5ms
|
675 |
+
97.38
|
676 |
+
ImageNet-Tiny
|
677 |
+
ResNet [11]
|
678 |
+
26
|
679 |
+
41.6M
|
680 |
+
0.66G
|
681 |
+
2.31ms
|
682 |
+
57.21
|
683 |
+
RAN (Ours)
|
684 |
+
21.3M
|
685 |
+
0.56G
|
686 |
+
2.58ms
|
687 |
+
62.28
|
688 |
+
ResNet [11]
|
689 |
+
35
|
690 |
+
58.5M
|
691 |
+
0.86G
|
692 |
+
2.85ms
|
693 |
+
57.80
|
694 |
+
RAN (Ours)
|
695 |
+
31.3M
|
696 |
+
0.68G
|
697 |
+
3.0ms
|
698 |
+
59.31
|
699 |
+
ResNet [11]
|
700 |
+
50
|
701 |
+
82.6M
|
702 |
+
1.18G
|
703 |
+
3.75ms
|
704 |
+
59.06
|
705 |
+
RAN (Ours)
|
706 |
+
45.8M
|
707 |
+
0.87G
|
708 |
+
4.02ms
|
709 |
+
62.40
|
710 |
+
ResNet [11]
|
711 |
+
101
|
712 |
+
149.3M
|
713 |
+
2.29G
|
714 |
+
6.86ms
|
715 |
+
60.62
|
716 |
+
RAN (Ours)
|
717 |
+
85.1M
|
718 |
+
1.52G
|
719 |
+
7.19ms
|
720 |
+
64.18
|
721 |
+
ResNet [11]
|
722 |
+
152
|
723 |
+
204.2M
|
724 |
+
3.41G
|
725 |
+
9.29ms
|
726 |
+
61.57
|
727 |
+
RAN (Ours)
|
728 |
+
117.4M
|
729 |
+
2.18G
|
730 |
+
9.72ms
|
731 |
+
66.16
|
732 |
+
Table 1. Image classification performance on the CIFAR benchmarks, SVHN, and Tiny ImageNet datasets for 26, 35, 50, 101, and
|
733 |
+
152-layer architectures.
|
734 |
+
of 10), and our proposed 26-layer networks with differ-
|
735 |
+
ent widening factors (k = 2, 4, 6, 8, and 10). Our pro-
|
736 |
+
posed wide RANs show significant accuracy improve-
|
737 |
+
ment over the original WRN-28-10.
|
738 |
+
This table also
|
739 |
+
demonstrates “the wider the better” for our proposed
|
740 |
+
wide RANs.
|
741 |
+
4.5. Method: Recursive Networks
|
742 |
+
This experiment compares the cost and performance
|
743 |
+
of our novel RARnet with the DRRN on the super-
|
744 |
+
resolution tasks. The RARnet is built by replacing the
|
745 |
+
residual unit U with a RAN layer described in Equation
|
746 |
+
10. These modifications form a new architecture, recur-
|
747 |
+
sive axial residual network (RARNet) whose depth d is
|
748 |
+
|
749 |
+
Figure 4. Recursive axial residual network (RARNet) architec-
|
750 |
+
ture with B = 4 and U = 3. Here, “RB” layer, and RAN refer
|
751 |
+
to a recursive block, and residual axial block, respectively.
|
752 |
+
defined as,
|
753 |
+
d = (1 + URAN) × B + 1
|
754 |
+
(11)
|
755 |
+
As two 1D layers are equivalent to one 2D layer and we
|
756 |
+
replace each residual unit by a RAN unit (see Equation
|
757 |
+
10). Hence, we rewrite Equation 4 to Equation 11 by
|
758 |
+
removing the multiplier for the residual unit. The pro-
|
759 |
+
posed RARNet with four RB blocks is shown on the left
|
760 |
+
in Figure 4. An RB block is expanded on the right.
|
761 |
+
We trained our proposed RARNet using 291 images
|
762 |
+
dataset [32] and tested using the Set5 dataset [3]. We
|
763 |
+
also use different scales (×2, ×3, and ×4) in training
|
764 |
+
and testing images.
|
765 |
+
We used similar data augmenta-
|
766 |
+
tion, training hyperparameters, and implementation de-
|
767 |
+
tails like [28].
|
768 |
+
4.6. Results: Recursive Networks
|
769 |
+
Table 3 shows the Peak Signal-to-Noise Ratio
|
770 |
+
(PSNR) results of several CNN models including
|
771 |
+
DRRN, and our proposed RARNet on the Set5 dataset.
|
772 |
+
The comparison between DRRN and RARNet is our
|
773 |
+
main focus as it directly indicates the effectiveness of us-
|
774 |
+
ing our proposed RAN block. DRRN19 and DRRN125
|
775 |
+
are constructed using B = 1, U = 9, and B = 1, U =
|
776 |
+
25, respectively. For fair comparison, we also construct
|
777 |
+
similar architecture like RARNet19 (B = 1, URAN =
|
778 |
+
9) and RARNet125 (B = 1, URAN = 25). Our pro-
|
779 |
+
posed models outperform all CNN models in Table 3
|
780 |
+
on the Set5 dataset and for all scaling factors. As we
|
781 |
+
are trying to propose a parameter-efficient architecture,
|
782 |
+
parameter comparison is very essential along with the
|
783 |
+
testing performance. Our proposed model for B = 1,
|
784 |
+
and URAN = 9 takes 213,254 parameters compared to
|
785 |
+
297,216 parameters of DRRN (B = 1, and U = 9).
|
786 |
+
RARNet, which is constructed using RAN blocks, re-
|
787 |
+
duces by 28.2% the trainable parameters compared to
|
788 |
+
Dataset
|
789 |
+
Model Name
|
790 |
+
Accuracy
|
791 |
+
CIFAR-10
|
792 |
+
WRN-28-10 [33]
|
793 |
+
94.68
|
794 |
+
RAN-26-2 (Ours)
|
795 |
+
96.32
|
796 |
+
RAN-26-4 (Ours)
|
797 |
+
96.68
|
798 |
+
RAN-26-6 (Ours)
|
799 |
+
96.77
|
800 |
+
RAN-26-8 (Ours)
|
801 |
+
96.83
|
802 |
+
RAN-26-10 (Ours)
|
803 |
+
96.87
|
804 |
+
CIFAR-100
|
805 |
+
WRN-28-10 [33]
|
806 |
+
79.57
|
807 |
+
RAN-26-2 (Ours)
|
808 |
+
83.54
|
809 |
+
RAN-26-4 (Ours)
|
810 |
+
83.75
|
811 |
+
RAN-26-6 (Ours)
|
812 |
+
83.78
|
813 |
+
RAN-26-8 (Ours)
|
814 |
+
83.82
|
815 |
+
RAN-26-10 (Ours)
|
816 |
+
83.92
|
817 |
+
Table 2. Image classification performance comparison on the
|
818 |
+
CIFAR benchmarks for 26-layer RAN architectures with dif-
|
819 |
+
ferent widening factors.
|
820 |
+
the DRRN.
|
821 |
+
5. Discussion and Conclusions
|
822 |
+
This work introduces a new residual block that can
|
823 |
+
be used as a replacement for the ResNet basic and bot-
|
824 |
+
tleneck blocks. This RAN block replaced the 2D convo-
|
825 |
+
lution from the original ResNet blocks with two sequen-
|
826 |
+
tial 1D convolutions along with a residual connection.
|
827 |
+
These modifications help to reduce trainable parame-
|
828 |
+
ters as well as improve validation performance on vision
|
829 |
+
classification. But the latency of our proposed model is
|
830 |
+
comparatively high. The proposed model’s performance
|
831 |
+
and parameter reduction outweigh this latency time limi-
|
832 |
+
tation. We also checked this proposed block for widened
|
833 |
+
ResNets and showed that the wide RANs obtain better
|
834 |
+
accuracy performance than the WRNs. We also checked
|
835 |
+
the effectiveness of RANs on the SISR task. Specifi-
|
836 |
+
cally, we applied our proposed RAN block on am image
|
837 |
+
restoration dataset and found that our proposed recursive
|
838 |
+
axial ResNets (RARNets) improve image resolution and
|
839 |
+
reduce trainable parameters more than the other CNN-
|
840 |
+
based super-resolution models. Extensive experiments
|
841 |
+
and analysis show that RANs can be deep, and wide
|
842 |
+
and these are parameter-efficient and superior models
|
843 |
+
for image classification and SISR. We have shown that
|
844 |
+
our proposed model is a viable replacement for ResNets
|
845 |
+
on the tasks that were tested. Further work is required
|
846 |
+
to determine the range of applications for which RANs
|
847 |
+
may offer advantages.
|
848 |
+
References
|
849 |
+
[1] Serge Belongie, Michael Wilber, and Andreas Veit.
|
850 |
+
Residual networks behave like ensembles of relatively
|
851 |
+
shallow networks. 2016. 1
|
852 |
+
|
853 |
+
X1
|
854 |
+
X
|
855 |
+
个
|
856 |
+
RAN
|
857 |
+
RB
|
858 |
+
RAN
|
859 |
+
RB
|
860 |
+
RB
|
861 |
+
4
|
862 |
+
RAN
|
863 |
+
RB
|
864 |
+
conv
|
865 |
+
RAN
|
866 |
+
★
|
867 |
+
--
|
868 |
+
Y
|
869 |
+
X2Dataset
|
870 |
+
Scale
|
871 |
+
SRCNN [7]
|
872 |
+
VDSR [17]
|
873 |
+
DRCN [18]
|
874 |
+
DRRN19
|
875 |
+
DRRN125
|
876 |
+
RARNet19
|
877 |
+
RARNet25
|
878 |
+
Set5
|
879 |
+
x2
|
880 |
+
36.66
|
881 |
+
37.53
|
882 |
+
37.63
|
883 |
+
37.66
|
884 |
+
37.74
|
885 |
+
37.73
|
886 |
+
37.84
|
887 |
+
x3
|
888 |
+
32.75
|
889 |
+
33.66
|
890 |
+
33.82
|
891 |
+
33.93
|
892 |
+
34.03
|
893 |
+
33.99
|
894 |
+
34.11
|
895 |
+
x4
|
896 |
+
30.48
|
897 |
+
31.35
|
898 |
+
31.53
|
899 |
+
31.58
|
900 |
+
31.68
|
901 |
+
31.63
|
902 |
+
31.84
|
903 |
+
Table 3. Benchmark testing PSNR results for scaling factors ×2, ×3, and ×4 on Set5 dataset.
|
904 |
+
[2] Yoshua Bengio, Aaron Courville, and Pascal Vincent.
|
905 |
+
Representation learning: A review and new perspectives.
|
906 |
+
IEEE transactions on pattern analysis and machine in-
|
907 |
+
telligence, 35(8):1798–1828, 2013. 1
|
908 |
+
[3] Marco Bevilacqua, Aline Roumy, Christine Guillemot,
|
909 |
+
and Marie Line Alberi-Morel. Low-complexity single-
|
910 |
+
image super-resolution based on nonnegative neighbor
|
911 |
+
embedding. 2012. 7
|
912 |
+
[4] Liang-Chieh Chen, Huiyu Wang, and Siyuan Qiao. Scal-
|
913 |
+
ing wide residual networks for panoptic segmentation.
|
914 |
+
arXiv preprint arXiv:2011.11675, 2020. 1, 2
|
915 |
+
[5] Tianqi Chen, Ian Goodfellow, and Jonathon Shlens.
|
916 |
+
Net2net: Accelerating learning via knowledge transfer.
|
917 |
+
arXiv preprint arXiv:1511.05641, 2015. 1
|
918 |
+
[6] Jifeng Dai, Yi Li, Kaiming He, and Jian Sun. R-fcn: Ob-
|
919 |
+
ject detection via region-based fully convolutional net-
|
920 |
+
works. Advances in neural information processing sys-
|
921 |
+
tems, 29, 2016. 1
|
922 |
+
[7] Chao Dong, Chen Change Loy, Kaiming He, and Xiaoou
|
923 |
+
Tang. Image super-resolution using deep convolutional
|
924 |
+
networks. IEEE transactions on pattern analysis and ma-
|
925 |
+
chine intelligence, 38(2):295–307, 2015. 3, 8
|
926 |
+
[8] Shang-Hua Gao, Ming-Ming Cheng, Kai Zhao, Xin-Yu
|
927 |
+
Zhang, Ming-Hsuan Yang, and Philip Torr.
|
928 |
+
Res2net:
|
929 |
+
A new multi-scale backbone architecture. IEEE trans-
|
930 |
+
actions on pattern analysis and machine intelligence,
|
931 |
+
43(2):652–662, 2019. 1
|
932 |
+
[9] Xavier Glorot and Yoshua Bengio. Understanding the
|
933 |
+
difficulty of training deep feedforward neural networks.
|
934 |
+
In Proceedings of the thirteenth international conference
|
935 |
+
on artificial intelligence and statistics, pages 249–256.
|
936 |
+
JMLR Workshop and Conference Proceedings, 2010. 1
|
937 |
+
[10] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian
|
938 |
+
Sun. Delving deep into rectifiers: Surpassing human-
|
939 |
+
level performance on imagenet classification.
|
940 |
+
In Pro-
|
941 |
+
ceedings of the IEEE international conference on com-
|
942 |
+
puter vision, pages 1026–1034, 2015. 1
|
943 |
+
[11] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian
|
944 |
+
Sun. Deep residual learning for image recognition. In
|
945 |
+
Proceedings of the IEEE conference on computer vision
|
946 |
+
and pattern recognition, pages 770–778, 2016. 1, 2, 3, 6
|
947 |
+
[12] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian
|
948 |
+
Sun. Identity mappings in deep residual networks. In Eu-
|
949 |
+
ropean conference on computer vision, pages 630–645.
|
950 |
+
Springer, 2016. 1, 2, 3
|
951 |
+
[13] Jonathan Ho, Nal Kalchbrenner, Dirk Weissenborn, and
|
952 |
+
Tim Salimans. Axial attention in multidimensional trans-
|
953 |
+
formers. arXiv preprint arXiv:1912.12180, 2019. 3
|
954 |
+
[14] Jie Hu, Li Shen, and Gang Sun. Squeeze-and-excitation
|
955 |
+
networks.
|
956 |
+
In Proceedings of the IEEE conference on
|
957 |
+
computer vision and pattern recognition, pages 7132–
|
958 |
+
7141, 2018. 1
|
959 |
+
[15] Gao Huang, Yu Sun, Zhuang Liu, Daniel Sedra, and
|
960 |
+
Kilian Q. Weinberger.
|
961 |
+
Deep networks with stochastic
|
962 |
+
depth. In Bastian Leibe, Jiri Matas, Nicu Sebe, and Max
|
963 |
+
Welling, editors, Computer Vision – ECCV 2016, pages
|
964 |
+
646–661, Cham, 2016. Springer International Publish-
|
965 |
+
ing. 2
|
966 |
+
[16] Sergey Ioffe and Christian Szegedy. Batch normaliza-
|
967 |
+
tion: Accelerating deep network training by reducing in-
|
968 |
+
ternal covariate shift. In International conference on ma-
|
969 |
+
chine learning, pages 448–456. PMLR, 2015. 1
|
970 |
+
[17] Jiwon Kim, Jung Kwon Lee, and Kyoung Mu Lee. Ac-
|
971 |
+
curate image super-resolution using very deep convolu-
|
972 |
+
tional networks. In Proceedings of the IEEE conference
|
973 |
+
on computer vision and pattern recognition, pages 1646–
|
974 |
+
1654, 2016. 1, 3, 8
|
975 |
+
[18] Jiwon Kim, Jung Kwon Lee, and Kyoung Mu Lee.
|
976 |
+
Deeply-recursive convolutional network for image super-
|
977 |
+
resolution. In Proceedings of the IEEE conference on
|
978 |
+
computer vision and pattern recognition, pages 1637–
|
979 |
+
1645, 2016. 1, 3, 8
|
980 |
+
[19] Alex Krizhevsky, Geoffrey Hinton, et al. Learning mul-
|
981 |
+
tiple layers of features from tiny images. 2009. 5
|
982 |
+
[20] Ya Le and Xuan S. Yang. Tiny imagenet visual recogni-
|
983 |
+
tion challenge. 2015. 5
|
984 |
+
[21] Xiu Li and Zuoying Cui.
|
985 |
+
Deep residual networks for
|
986 |
+
plankton classification.
|
987 |
+
In OCEANS 2016 MTS/IEEE
|
988 |
+
Monterey, pages 1–4, 2016. 1
|
989 |
+
[22] Tsung-Yi Lin, Piotr Doll´ar, Ross Girshick, Kaiming He,
|
990 |
+
Bharath Hariharan, and Serge Belongie. Feature pyra-
|
991 |
+
mid networks for object detection. In Proceedings of the
|
992 |
+
IEEE conference on computer vision and pattern recog-
|
993 |
+
nition, pages 2117–2125, 2017. 1
|
994 |
+
[23] Xiaojiao Mao, Chunhua Shen, and Yu-Bin Yang. Im-
|
995 |
+
age restoration using very deep convolutional encoder-
|
996 |
+
decoder networks with symmetric skip connections. Ad-
|
997 |
+
vances in neural information processing systems, 29,
|
998 |
+
2016. 3
|
999 |
+
[24] Yuval Netzer, Tao Wang, Adam Coates, Alessandro Bis-
|
1000 |
+
sacco, Bo Wu, and Andrew Y Ng. Reading digits in nat-
|
1001 |
+
ural images with unsupervised feature learning. 2011.
|
1002 |
+
5
|
1003 |
+
[25] Siyuan Qiao, Huiyu Wang, Chenxi Liu, Wei Shen, and
|
1004 |
+
Alan Yuille.
|
1005 |
+
Micro-batch training with batch-channel
|
1006 |
+
normalization and weight standardization. arXiv preprint
|
1007 |
+
arXiv:1903.10520, 2019. 1
|
1008 |
+
[26] Prajit Ramachandran, Niki Parmar, Ashish Vaswani, Ir-
|
1009 |
+
wan Bello, Anselm Levskaya, and Jonathon Shlens.
|
1010 |
+
Stand-alone self-attention in vision models.
|
1011 |
+
arXiv
|
1012 |
+
preprint arXiv:1906.05909, 2019. 2, 4
|
1013 |
+
|
1014 |
+
[27] Ilya Sutskever, James Martens, George Dahl, and Geof-
|
1015 |
+
frey Hinton. On the importance of initialization and mo-
|
1016 |
+
mentum in deep learning. In International conference on
|
1017 |
+
machine learning, pages 1139–1147. PMLR, 2013. 1
|
1018 |
+
[28] Ying Tai, Jian Yang, and Xiaoming Liu. Image super-
|
1019 |
+
resolution via deep recursive residual network. In Pro-
|
1020 |
+
ceedings of the IEEE conference on computer vision and
|
1021 |
+
pattern recognition, pages 3147–3155, 2017. 1, 3, 7
|
1022 |
+
[29] Huiyu Wang, Yukun Zhu, Bradley Green, Hartwig
|
1023 |
+
Adam, Alan Yuille, and Liang-Chieh Chen.
|
1024 |
+
Axial-
|
1025 |
+
deeplab: Stand-alone axial-attention for panoptic seg-
|
1026 |
+
mentation. In European Conference on Computer Vision,
|
1027 |
+
pages 108–126. Springer, 2020. 3
|
1028 |
+
[30] Yuxin Wu and Kaiming He. Group normalization. In
|
1029 |
+
Proceedings of the European conference on computer vi-
|
1030 |
+
sion (ECCV), pages 3–19, 2018. 1
|
1031 |
+
[31] Zifeng Wu, Chunhua Shen, and Anton Van Den Hengel.
|
1032 |
+
Wider or deeper: Revisiting the resnet model for visual
|
1033 |
+
recognition. Pattern Recognition, 90:119–133, 2019. 1,
|
1034 |
+
3
|
1035 |
+
[32] Jianchao Yang, John Wright, Thomas S Huang, and Yi
|
1036 |
+
Ma.
|
1037 |
+
Image super-resolution via sparse representation.
|
1038 |
+
IEEE transactions on image processing, 19(11):2861–
|
1039 |
+
2873, 2010. 7
|
1040 |
+
[33] Sergey Zagoruyko and Nikos Komodakis. Wide residual
|
1041 |
+
networks. arXiv preprint arXiv:1605.07146, 2016. 1, 2,
|
1042 |
+
3, 5, 7
|
1043 |
+
|
3NE3T4oBgHgl3EQfoQom/content/tmp_files/load_file.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
4dAzT4oBgHgl3EQfuv2o/content/2301.01696v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:385b6ab986485c74f7a331c13ae5b2617a1cff457b7daeaa7fe679b7d74e6439
|
3 |
+
size 1316514
|
4dAzT4oBgHgl3EQfuv2o/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c5398059a290b5f17e26040908607a4830ab1c11df0663c84292d9a4287f6e33
|
3 |
+
size 353784
|
5NE4T4oBgHgl3EQfBQty/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e99c2eedeaba5eabd8abcaca8e1a215a8d692db7f15ce5bbea7f9b45cb92b301
|
3 |
+
size 138488
|
99AzT4oBgHgl3EQf_P4t/content/2301.01944v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:799a3af2a9c0381442f98ac482a7a6961f8916cbbfb6e8440395d906e0f5e7b3
|
3 |
+
size 1485842
|
99AzT4oBgHgl3EQf_P4t/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b75388a2d8276d8f67e689eb7a084440149d9a52cc2ae09acdae658b0e1b07da
|
3 |
+
size 332426
|
9NFST4oBgHgl3EQfaziV/content/tmp_files/load_file.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
A9AyT4oBgHgl3EQfRvd3/content/2301.00072v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:21d0fddaa7cc80733690766b7ee0c9e8ce487d1f4f57374dc909985d767dd683
|
3 |
+
size 1468140
|
A9AyT4oBgHgl3EQfRvd3/vector_store/index.faiss
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a588373e0c70eccf27aa2e166d5b319b0ee7256663bb390b78c21058a37fcfa8
|
3 |
+
size 4915245
|
A9AyT4oBgHgl3EQfRvd3/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:86b11c9855600226696bea4f38fa60a776e77df86e5bf7757a15db205d7c02d9
|
3 |
+
size 197048
|
AdE2T4oBgHgl3EQfRQcf/content/2301.03778v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c193a78ca62e816c5dd0eada8a4c24ba608b6931f38aca740235c53c72bec300
|
3 |
+
size 516482
|
AdE2T4oBgHgl3EQfRQcf/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:feff0c7f76c17afc2559f1a1364484fd773fa5e02e1c2e81d35c8e5aa5c33e8d
|
3 |
+
size 98656
|
AtAzT4oBgHgl3EQf__8r/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1ed8159929f9780ef64d657108dd8876575b0feeaebba6c5a831b95e9341266a
|
3 |
+
size 135517
|
AtE3T4oBgHgl3EQfsgvt/content/tmp_files/2301.04669v1.pdf.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
AtE3T4oBgHgl3EQfsgvt/content/tmp_files/load_file.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
BNE4T4oBgHgl3EQf5A7u/content/tmp_files/2301.05320v1.pdf.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
BNE4T4oBgHgl3EQf5A7u/content/tmp_files/load_file.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
BdE5T4oBgHgl3EQfTA_5/content/2301.05534v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cd08b91595ba9d76064378a9b659cc902bcf16b78a197e1c528b984016fd7bcd
|
3 |
+
size 521263
|
BdE5T4oBgHgl3EQfTA_5/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b692fb1568247bb05f962a31fd6f9f82040bdc8c87b1e1cb7e5eba29aae6b0ae
|
3 |
+
size 340263
|
BtE2T4oBgHgl3EQf8wk1/content/tmp_files/2301.04221v1.pdf.txt
ADDED
@@ -0,0 +1,445 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Citation
|
2 |
+
R. Benkert, O.J. Aribido, and G. AlRegib, “Explaining Deep Models Through Forgettable Learning Dynamics,” in IEEE
|
3 |
+
International Conference on Image Processing (ICIP), Anchorage, AK, Sep. 19-22 2021
|
4 |
+
Review
|
5 |
+
Date of acceptance: June 2021
|
6 |
+
Bib
|
7 |
+
@ARTICLE{benkert2021 ICIP,
|
8 |
+
author={R. Benkert, O.J. Aribido, and G. AlRegib},
|
9 |
+
journal={IEEE International Conference on Image Processing},
|
10 |
+
title={Explaining Deep Models Through Forgettable Learning Dynamics},
|
11 |
+
year={2021}
|
12 |
+
Copyright
|
13 |
+
©2022 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses,
|
14 |
+
in any current or future media, including reprinting/republishing this material for advertising or promotional purposes,
|
15 |
+
creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of
|
16 |
+
this work in other works.
|
17 |
+
Contact
|
18 |
+
rbenkert3@gatech.edu OR alregib@gatech.edu
|
19 |
+
http://ghassanalregib.info/
|
20 |
+
arXiv:2301.04221v1 [cs.CV] 10 Jan 2023
|
21 |
+
|
22 |
+
EXPLAINING DEEP MODELS THROUGH FORGETTABLE LEARNING DYNAMICS
|
23 |
+
Ryan Benkert, Oluwaseun Joseph Aribido and Ghassan AlRegib
|
24 |
+
School of Electrical and Computer Engineering
|
25 |
+
Georgia Institute of Technology,
|
26 |
+
Atlanta, GA, 30332-0250, USA
|
27 |
+
{rbenkert3, oja, alregib}@gatech.edu
|
28 |
+
ABSTRACT
|
29 |
+
Even though deep neural networks have shown tremendous
|
30 |
+
success in countless applications, explaining model behaviour
|
31 |
+
or predictions is an open research problem. In this paper, we
|
32 |
+
address this issue by employing a simple yet effective method
|
33 |
+
by analysing the learning dynamics of deep neural networks
|
34 |
+
in semantic segmentation tasks.
|
35 |
+
Specifically, we visualize
|
36 |
+
the learning behaviour during training by tracking how of-
|
37 |
+
ten samples are learned and forgotten in subsequent training
|
38 |
+
epochs. This further allows us to derive important informa-
|
39 |
+
tion about the proximity to the class decision boundary and
|
40 |
+
identify regions that pose a particular challenge to the model.
|
41 |
+
Inspired by this phenomenon, we present a novel segmenta-
|
42 |
+
tion method that actively uses this information to alter the
|
43 |
+
data representation within the model by increasing the vari-
|
44 |
+
ety of difficult regions. Finally, we show that our method
|
45 |
+
consistently reduces the amount of regions that are forgotten
|
46 |
+
frequently. We further evaluate our method in light of the
|
47 |
+
segmentation performance.
|
48 |
+
Index Terms—
|
49 |
+
Example Forgetting, Interpretability,
|
50 |
+
Support Vectors, Semantic Segmentation
|
51 |
+
1. INTRODUCTION
|
52 |
+
Over the last decade, deep learning has had an impact on
|
53 |
+
nearly every sector. It has paved the way for scientific break-
|
54 |
+
throughs in areas ranging from image recognition to com-
|
55 |
+
plex medical diagnostics. The success of deep neural mod-
|
56 |
+
els lies in their ability to learn complex non-linear functions
|
57 |
+
and estimate distributions of high dimensional data. In ad-
|
58 |
+
dition, open-source deep learning libraries enable fast large-
|
59 |
+
scale deployment, making state-of-the-art algorithms avail-
|
60 |
+
able for countless applications. A central component of neu-
|
61 |
+
ral networks is how well they are capable of representing the
|
62 |
+
target data. Well designed models can capture unique repre-
|
63 |
+
sentations of the data and ”learn” a function with a small er-
|
64 |
+
ror margin. In contrast, poor representations are often incon-
|
65 |
+
sistent and can produce semantically incorrect predictions.
|
66 |
+
Therefore, understanding how the model represents and inter-
|
67 |
+
acts with the data remains a very challenging and highly rele-
|
68 |
+
vant research problem. One application, where this behaviour
|
69 |
+
is especially important, is deep learning models for compu-
|
70 |
+
tational seismic interpretation. In seismic, there is limited
|
71 |
+
open-source annotated data due to the high cost associated
|
72 |
+
with data acquisition and expert annotation. For this reason,
|
73 |
+
architectures designed for large computer vision applications
|
74 |
+
over-fit on limited annotated seismic data and result in poor
|
75 |
+
generalization capabilities. Due to the high relevance in this
|
76 |
+
field, we present our method using the F3 block dataset ([1])
|
77 |
+
where several classes are underrepresented. Nevertheless, the
|
78 |
+
work is applicable to a wide range of 2D data.
|
79 |
+
In this paper, we view neural networks in the context of their
|
80 |
+
learning dynamics. Specifically, neural networks do not learn
|
81 |
+
continually but forget samples over time. One branch of re-
|
82 |
+
search investigates the forgotten information when a model is
|
83 |
+
trained on one task but fine-tuned on another. In literature,
|
84 |
+
this is often referred to as catastrophic forgetting ([2, 3]). In
|
85 |
+
contrast, [4] view the dynamics within a single data distribu-
|
86 |
+
tion and track the frequency in which information is forgotten
|
87 |
+
during training. In this paper, we build upon this intuition
|
88 |
+
and visualize frequently forgotten regions in a generalized
|
89 |
+
segmentation framework. Similar to uncertainty works with
|
90 |
+
Bayesian inference ([5]) or gradient based explanations ([6, 7,
|
91 |
+
8]), we can identify difficult regions and explain segmentation
|
92 |
+
predictions. In contrast to other explainability techniques, fre-
|
93 |
+
quently forgotten regions contain valuable information about
|
94 |
+
the position within the representation space. Specifically, fre-
|
95 |
+
quently forgotten regions are closer to the decision boundary
|
96 |
+
and pose a threat to the generalization performance. Based on
|
97 |
+
these findings we engineer a method that identifies challeng-
|
98 |
+
ing pixels and generates new samples that actively influence
|
99 |
+
the representation mapping. In Fig. 1 we show a toy exam-
|
100 |
+
ple of our method. Based on the identified support vectors
|
101 |
+
(circled blue disks), we generate new samples (green) that ac-
|
102 |
+
tively shift the decision boundary (black line) to reduce the
|
103 |
+
amount of support vectors for a specific class. In contrast to
|
104 |
+
traditional data augmentation ([9]), our method is data-driven
|
105 |
+
and consistently reduces support vectors within the model.
|
106 |
+
The following are our contributions: First, we visualize dif-
|
107 |
+
ficult regions in the data by analyzing the learning dynamics
|
108 |
+
|
109 |
+
during training. Second, we develop a augmentation method
|
110 |
+
that reduces prone regions by actively shifting the decision
|
111 |
+
boundary. Lastly, we compare our technique to popular aug-
|
112 |
+
mentation techniques in literature.
|
113 |
+
Fig. 1. Intuition of our support vector augmentation method
|
114 |
+
2. METHODOLOGY
|
115 |
+
Our goal is to quantify the learning behaviour during image
|
116 |
+
segmentation by analysing the frequency of sample forget-
|
117 |
+
ting. In this section, we formally define when a sample is for-
|
118 |
+
gotten and how this relates to the proximity of samples to the
|
119 |
+
decision boundary. Furthermore, we exploit these dynamics
|
120 |
+
to actively shift the decision boundary in our model. Specif-
|
121 |
+
ically, we identify support vectors in our training images and
|
122 |
+
increase the variety through style transfer.
|
123 |
+
2.1. Forgetting Events
|
124 |
+
Intuitively, a sample is forgotten if it was classified correctly
|
125 |
+
in a previous epoch and miss-classified in the current epoch.
|
126 |
+
More formally, for image I with (pixel, annotation) tuples
|
127 |
+
(xi, yi), we define the accuracy of each pixel at a epoch t as
|
128 |
+
acct
|
129 |
+
i = 1˜yt
|
130 |
+
i=yi.
|
131 |
+
(1)
|
132 |
+
Here, 1˜yt
|
133 |
+
i=yi refers to a binary variable indicating the cor-
|
134 |
+
rectness of the classified pixel in image I. With this definition
|
135 |
+
we say a pixel was forgotten at epoch t + 1 if the accuracy at
|
136 |
+
t + 1 is strictly smaller than the accuracy at epoch t:
|
137 |
+
f t
|
138 |
+
i = int(acct+1
|
139 |
+
i
|
140 |
+
< acct
|
141 |
+
i) ∈ 1, 0
|
142 |
+
(2)
|
143 |
+
Following [4], we define the binary event f t
|
144 |
+
i as a forgetting
|
145 |
+
event at epoch t. Since our application is a segmentation set-
|
146 |
+
ting, we further visualize forgetting events in the spatial do-
|
147 |
+
main. Specifically, we count the amount of forgetting events
|
148 |
+
occurring at each pixel i and display them in a heat map.
|
149 |
+
Mathematically, heat map L ∈ N0+
|
150 |
+
M×N is the sum over
|
151 |
+
all forgetting events f t
|
152 |
+
i that occurred in time frame T:
|
153 |
+
Li =
|
154 |
+
T
|
155 |
+
�
|
156 |
+
t=0
|
157 |
+
f t
|
158 |
+
i
|
159 |
+
(3)
|
160 |
+
Fig. 2. An example of a forgetting event heat map as well as
|
161 |
+
its corresponding image and annotation. Pixels close to the
|
162 |
+
decision boundary are highlighted in different shades of red
|
163 |
+
whereas pixels deep within the class manifold are dark blue.
|
164 |
+
Note, that several classes (e.g the orange class ”scruff”) are
|
165 |
+
underrepresented
|
166 |
+
For better illustration, we present an example of a heat map in
|
167 |
+
Fig 2. Areas that were forgotten frequently are highlighted in
|
168 |
+
shades of red in contrast to pixels that were forgotten rarely
|
169 |
+
(blue). Similar to [4], we can broadly classify the pixels into
|
170 |
+
two groups: The first group consists of the pixels that were
|
171 |
+
never forgotten or forgotten only rarely (e.g. light blue class
|
172 |
+
in the center of Fig 2). Since every epoch represents a model
|
173 |
+
update, we conclude that these pixels are never or only rarely
|
174 |
+
shifted outside of the class manifold in the feature space. In
|
175 |
+
contrast, the second group consists of pixels forgotten more
|
176 |
+
frequently (e.g. the class boundaries in Fig. 2). Specifically,
|
177 |
+
this means that several model updates shifted these pixels over
|
178 |
+
the decision boundary during training, mapping them closer
|
179 |
+
to the decision boundary than unforgettable pixels. Similar to
|
180 |
+
[4], we argue that these pixels play a similar role to support
|
181 |
+
vectors in maximal margin classifiers. In particular, we will
|
182 |
+
show the importance of forgetting events in analyzing model
|
183 |
+
predictions.
|
184 |
+
2.2. Support Vector Augmentation
|
185 |
+
As we have seen in Section 2.1, forgetting events are a useful
|
186 |
+
metric to quantify the sample position in the representation
|
187 |
+
space. To be precise, forgetting events provide information
|
188 |
+
about the proximity of samples to the decision boundary in
|
189 |
+
a training interval T.
|
190 |
+
In this section, we will exploit this
|
191 |
+
information to increase the variety of forgettable pixels. In
|
192 |
+
the seismic application, we achieve this through style transfer
|
193 |
+
models ([10, 11]).
|
194 |
+
Specifically, we transfer class specific
|
195 |
+
visual features from a source image to a target image with-
|
196 |
+
out changing the structure or characteristics of neighboring
|
197 |
+
classes. We target specific classes with a high forgetting event
|
198 |
+
density and transfer the characteristics to other sections with-
|
199 |
+
out affecting the geologic properties of the seismic images.
|
200 |
+
An example of a style transfer is presented in Fig. 3. Here, we
|
201 |
+
show the target for the transfer, the resulting transfer image
|
202 |
+
(second column from the left), the target annotation, and the
|
203 |
+
style source with its corresponding label. The image on the
|
204 |
+
far right of Fig. 3 shows the difference between the transfer
|
205 |
+
images of subsequent batches with different style sources. In
|
206 |
+
|
207 |
+
Fig. 3. Example of a feature transfer within two seismic images.
|
208 |
+
this example, we transfer the visual features of class ”scruff”
|
209 |
+
(orange) from the style source to the target image. Moreover,
|
210 |
+
switching the source image largely affects the target class
|
211 |
+
(difference image in Fig. 3) and presents the desired func-
|
212 |
+
tionality of our algorithm.
|
213 |
+
Our method consists of a segmentation model, a transfer
|
214 |
+
model and a data selection step (Fig 4). First, our method
|
215 |
+
trains the segmentation model on the training data and pro-
|
216 |
+
duces a forgetting event heat map for every validation image
|
217 |
+
in the training volume.
|
218 |
+
In principle, heat maps could be
|
219 |
+
produced for the entire training set but is computationally
|
220 |
+
inefficient. In our implementation, the segmentation archi-
|
221 |
+
tecture is based on the deeplab-v3 architecture by [12] with a
|
222 |
+
resnet-18 ([13]) backbone. Our choice is based on empirical
|
223 |
+
evaluations of performance and computational efficiency.
|
224 |
+
In the next step of our workflow, we calculate the forgetting
|
225 |
+
event density within each class of a heat map. Specifically,
|
226 |
+
we sum all forgetting events fi∈ck within class ck of a heat
|
227 |
+
map and divide by the number of pixels of class ck in the im-
|
228 |
+
age. This metric allows us to rank each heat map according
|
229 |
+
to its density with regard to an arbitrary class in the dataset.
|
230 |
+
Finally, we transfer the visual features of a predefined class
|
231 |
+
from the images with the highest density to randomly sampled
|
232 |
+
training images. Here, our architecture is a slightly altered
|
233 |
+
version of [10].
|
234 |
+
In short, the model modulates the style
|
235 |
+
characteristics on the batch-normalization outputs within the
|
236 |
+
image generator. This enables class specific transfers without
|
237 |
+
affecting the geology of the image. In our method, we trans-
|
238 |
+
fer the underrepresented classes within our data-set as these
|
239 |
+
classes are generally most difficult to learn. After generation,
|
240 |
+
the transferred images are added to our training pool and the
|
241 |
+
segmentation model is trained from scratch.
|
242 |
+
3. RESULTS AND DISCUSSION
|
243 |
+
To produce computationally efficient forgetting event heat
|
244 |
+
maps, we train the network for 60 epochs and only track the
|
245 |
+
validation and test set heat maps. In each of our experiments,
|
246 |
+
the validation set is chosen by selecting every fifth vertical
|
247 |
+
slice (referred to as inlines) and horizontal slice (referred to
|
248 |
+
Fig. 4. Entire workflow of our architecture.
|
249 |
+
as crosslines) of the training volume. Subsequently, we query
|
250 |
+
six images with the highest forgetting event density of our
|
251 |
+
target class. Each image is used as a style source to generate
|
252 |
+
64 transfer images. For generation, we sample randomly to
|
253 |
+
obtain the target image and retrain the segmentation model
|
254 |
+
from scratch. In this paper, we only report the results when
|
255 |
+
transferring the orange class (scruff). Other underrepresented
|
256 |
+
classes (e.g. the red class zechstein) rendered similar results
|
257 |
+
and are omitted. In our numerical analysis, our results are
|
258 |
+
averaged over five separate experiments to account for ran-
|
259 |
+
dom factors (e.g. initialization). We compare our method to
|
260 |
+
other common augmentation methods (random horizontal flip
|
261 |
+
and random rotations) in terms of segmentation performance
|
262 |
+
(in class accuracy) and the forgetting event heat maps. The
|
263 |
+
results are shown in Table 1 and Fig. 5 respectively. Overall,
|
264 |
+
our method reduces the amount of forgetting events signifi-
|
265 |
+
cantly more than other augmentation methods. Specifically,
|
266 |
+
we find that several regions with a high forgetting event den-
|
267 |
+
sity are transferred to a low density or disappear entirely
|
268 |
+
(bottom class in Section 2 or entire right part of Section 6).
|
269 |
+
These regions were shifted away from the decision boundary
|
270 |
+
and model updates had little or no affect on the classification
|
271 |
+
|
272 |
+
F3
|
273 |
+
Data
|
274 |
+
Unc.
|
275 |
+
Selection
|
276 |
+
Synth
|
277 |
+
F3 DataClass Accuracy
|
278 |
+
Class
|
279 |
+
Upper N. S.
|
280 |
+
Middle N. S.
|
281 |
+
Lower N. S.
|
282 |
+
Chalk
|
283 |
+
Scruff
|
284 |
+
Zechstein
|
285 |
+
Baseline
|
286 |
+
0.982
|
287 |
+
0.912
|
288 |
+
0.969
|
289 |
+
0.816
|
290 |
+
0.383
|
291 |
+
0.651
|
292 |
+
Random Flip
|
293 |
+
0.983
|
294 |
+
0.899
|
295 |
+
0.967
|
296 |
+
0.820
|
297 |
+
0.354
|
298 |
+
0.672
|
299 |
+
Random Rotate
|
300 |
+
0.974
|
301 |
+
0.933
|
302 |
+
0.974
|
303 |
+
0.824
|
304 |
+
0.533
|
305 |
+
0.681
|
306 |
+
Ours
|
307 |
+
0.982
|
308 |
+
0.906
|
309 |
+
0.966
|
310 |
+
0.810
|
311 |
+
0.438
|
312 |
+
0.656
|
313 |
+
Table 1. Averaged class accuracy over five augmentation experiments.
|
314 |
+
Fig. 5. Heat maps when using different augmentation methods. Our method significantly reduces the amount of forgetting
|
315 |
+
events and impacts the regions shape.
|
316 |
+
accuracy during training. In contrast, we find that no for-
|
317 |
+
getting event regions disappear in the standard augmentation
|
318 |
+
methods. Instead, the severity of forgetting event regions is
|
319 |
+
reduced.
|
320 |
+
Numerically, all methods overwhelmingly match or outper-
|
321 |
+
form the baseline with respect to class accuracy. We note,
|
322 |
+
that our method only affects the scruff class accuracy and
|
323 |
+
matches the baseline performance of all other classes. This
|
324 |
+
shows flexibility in our algorithm and allows an increased
|
325 |
+
control over the network performance. We further observe
|
326 |
+
that random rotations outperform our technique even in the
|
327 |
+
scruff class. Although the class accuracy is higher, the for-
|
328 |
+
getting event maps show significantly more forgetting event
|
329 |
+
regions than the maps produced by our method. Moreover,
|
330 |
+
the locations and shapes of the prone regions produced by the
|
331 |
+
traditional methods are similar to the baseline regions (e.g.
|
332 |
+
bottom class of Section 3). In contrast, our method changes
|
333 |
+
the shape and location of the severe forgetting event region
|
334 |
+
indicating a clear shift in the representation space.
|
335 |
+
Finally, we also identify regions with a lower forgetting event
|
336 |
+
density that transitioned to a higher density (Section 5 bot-
|
337 |
+
tom left) by applying our method. This allows us to analyze
|
338 |
+
model weaknesses and interpret the segmentation output in
|
339 |
+
light of training difficulty.
|
340 |
+
4. CONCLUSION
|
341 |
+
In this paper, we explain the behaviour of deep models by
|
342 |
+
tracking how often samples are forgotten in between model
|
343 |
+
updates. We identify regions that are especially difficult for
|
344 |
+
the model and evaluate how these regions change when dif-
|
345 |
+
ferent segmentation strategies are pursued. Finally, we engi-
|
346 |
+
neer a novel method that explicitly exploits this characteristic
|
347 |
+
to actively influence how the data is represented within the
|
348 |
+
model. We show that our method increases the margin of dif-
|
349 |
+
ficult regions indicating a clear decision boundary shift.
|
350 |
+
|
351 |
+
Section 1
|
352 |
+
Section 2
|
353 |
+
Section 3
|
354 |
+
Section 4
|
355 |
+
Section 5
|
356 |
+
Section 6
|
357 |
+
15
|
358 |
+
15
|
359 |
+
Baseline
|
360 |
+
10
|
361 |
+
10
|
362 |
+
10
|
363 |
+
10
|
364 |
+
15
|
365 |
+
Random Flip
|
366 |
+
10
|
367 |
+
10
|
368 |
+
10
|
369 |
+
10
|
370 |
+
10
|
371 |
+
20
|
372 |
+
15
|
373 |
+
15
|
374 |
+
15
|
375 |
+
15
|
376 |
+
Random
|
377 |
+
10
|
378 |
+
10
|
379 |
+
10
|
380 |
+
Rotation
|
381 |
+
20
|
382 |
+
15
|
383 |
+
15
|
384 |
+
15
|
385 |
+
Ours
|
386 |
+
10
|
387 |
+
105. REFERENCES
|
388 |
+
[1] Y. Alaudah, P. Michałowicz, M. Alfarraj, and G. Al-
|
389 |
+
Regib, “A machine-learning benchmark for facies clas-
|
390 |
+
sification,” Interpretation, vol. 7, no. 3, pp. SE175–
|
391 |
+
SE187, 2019.
|
392 |
+
[2] J. Kirkpatrick, R. Pascanu, N. Rabinowitz, J. Veness,
|
393 |
+
G. Desjardins, A. A. Rusu, K. Milan, J. Quan, T. Ra-
|
394 |
+
malho, A. Grabska-Barwinska, et al., “Overcoming
|
395 |
+
catastrophic forgetting in neural networks,” Proceedings
|
396 |
+
of the national academy of sciences, vol. 114, no. 13,
|
397 |
+
pp. 3521–3526, 2017.
|
398 |
+
[3] H. Ritter, A. Botev, and D. Barber, “Online structured
|
399 |
+
laplace approximations for overcoming catastrophic for-
|
400 |
+
getting,” in Advances in Neural Information Processing
|
401 |
+
Systems, pp. 3738–3748, 2018.
|
402 |
+
[4] M. Toneva, A. Sordoni, R. T. d. Combes, A. Trischler,
|
403 |
+
Y. Bengio, and G. J. Gordon, “An empirical study of ex-
|
404 |
+
ample forgetting during deep neural network learning,”
|
405 |
+
arXiv preprint arXiv:1812.05159, 2018.
|
406 |
+
[5] A. Kendall and Y. Gal, “What uncertainties do we
|
407 |
+
need in bayesian deep learning for computer vision?,”
|
408 |
+
in Advances in neural information processing systems,
|
409 |
+
pp. 5574–5584, 2017.
|
410 |
+
[6] R. R. Selvaraju, M. Cogswell, A. Das, R. Vedantam,
|
411 |
+
D. Parikh, and D. Batra, “Grad-cam: Visual explana-
|
412 |
+
tions from deep networks via gradient-based localiza-
|
413 |
+
tion,” in Proceedings of the IEEE international confer-
|
414 |
+
ence on computer vision, pp. 618–626, 2017.
|
415 |
+
[7] M. Prabhushankar, G. Kwon, D. Temel, and G. Al-
|
416 |
+
Regib, “Contrastive explanations in neural networks,” in
|
417 |
+
2020 IEEE International Conference on Image Process-
|
418 |
+
ing (ICIP), pp. 3289–3293, IEEE, 2020.
|
419 |
+
[8] J. Lee and G. AlRegib, “Gradients as a measure of
|
420 |
+
uncertainty in neural networks,” in 2020 IEEE In-
|
421 |
+
ternational Conference on Image Processing (ICIP),
|
422 |
+
pp. 2416–2420, IEEE, 2020.
|
423 |
+
[9] G. B. Orr and K.-R. M¨uller, Neural networks: tricks of
|
424 |
+
the trade. Springer, 2003.
|
425 |
+
[10] P. Zhu, R. Abdal, Y. Qin, and P. Wonka, “Sean: Image
|
426 |
+
synthesis with semantic region-adaptive normalization,”
|
427 |
+
in Proceedings of the IEEE/CVF Conference on Com-
|
428 |
+
puter Vision and Pattern Recognition, pp. 5104–5113,
|
429 |
+
2020.
|
430 |
+
[11] T. Park, M.-Y. Liu, T.-C. Wang, and J.-Y. Zhu, “Seman-
|
431 |
+
tic image synthesis with spatially-adaptive normaliza-
|
432 |
+
tion,” in Proceedings of the IEEE Conference on Com-
|
433 |
+
puter Vision and Pattern Recognition, pp. 2337–2346,
|
434 |
+
2019.
|
435 |
+
[12] L.-C. Chen, G. Papandreou, I. Kokkinos, K. Murphy,
|
436 |
+
and A. L. Yuille, “Deeplab: Semantic image segmenta-
|
437 |
+
tion with deep convolutional nets, atrous convolution,
|
438 |
+
and fully connected crfs,” IEEE transactions on pat-
|
439 |
+
tern analysis and machine intelligence, vol. 40, no. 4,
|
440 |
+
pp. 834–848, 2017.
|
441 |
+
[13] K. He, X. Zhang, S. Ren, and J. Sun, “Deep residual
|
442 |
+
learning for image recognition,” in Proceedings of the
|
443 |
+
IEEE conference on computer vision and pattern recog-
|
444 |
+
nition, pp. 770–778, 2016.
|
445 |
+
|
BtE2T4oBgHgl3EQf8wk1/content/tmp_files/load_file.txt
ADDED
@@ -0,0 +1,304 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf,len=303
|
2 |
+
page_content='Citation R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
3 |
+
page_content=' Benkert, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
4 |
+
page_content='J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
5 |
+
page_content=' Aribido, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
6 |
+
page_content=' AlRegib, “Explaining Deep Models Through Forgettable Learning Dynamics,” in IEEE International Conference on Image Processing (ICIP), Anchorage, AK, Sep.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
7 |
+
page_content=' 19-22 2021 Review Date of acceptance: June 2021 Bib @ARTICLE{benkert2021 ICIP, author={R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
8 |
+
page_content=' Benkert, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
9 |
+
page_content='J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
10 |
+
page_content=' Aribido, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
11 |
+
page_content=' AlRegib}, journal={IEEE International Conference on Image Processing}, title={Explaining Deep Models Through Forgettable Learning Dynamics}, year={2021} Copyright ©2022 IEEE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
12 |
+
page_content=' Personal use of this material is permitted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
13 |
+
page_content=' Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
14 |
+
page_content=' Contact rbenkert3@gatech.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
15 |
+
page_content='edu OR alregib@gatech.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
16 |
+
page_content='edu http://ghassanalregib.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
17 |
+
page_content='info/ arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
18 |
+
page_content='04221v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
19 |
+
page_content='CV] 10 Jan 2023 EXPLAINING DEEP MODELS THROUGH FORGETTABLE LEARNING DYNAMICS Ryan Benkert, Oluwaseun Joseph Aribido and Ghassan AlRegib School of Electrical and Computer Engineering Georgia Institute of Technology, Atlanta, GA, 30332-0250, USA {rbenkert3, oja, alregib}@gatech.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
20 |
+
page_content='edu ABSTRACT Even though deep neural networks have shown tremendous success in countless applications, explaining model behaviour or predictions is an open research problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
21 |
+
page_content=' In this paper, we address this issue by employing a simple yet effective method by analysing the learning dynamics of deep neural networks in semantic segmentation tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
22 |
+
page_content=' Specifically, we visualize the learning behaviour during training by tracking how of- ten samples are learned and forgotten in subsequent training epochs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
23 |
+
page_content=' This further allows us to derive important informa- tion about the proximity to the class decision boundary and identify regions that pose a particular challenge to the model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
24 |
+
page_content=' Inspired by this phenomenon, we present a novel segmenta- tion method that actively uses this information to alter the data representation within the model by increasing the vari- ety of difficult regions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
25 |
+
page_content=' Finally, we show that our method consistently reduces the amount of regions that are forgotten frequently.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
26 |
+
page_content=' We further evaluate our method in light of the segmentation performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
27 |
+
page_content=' Index Terms— Example Forgetting, Interpretability, Support Vectors, Semantic Segmentation 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
28 |
+
page_content=' INTRODUCTION Over the last decade, deep learning has had an impact on nearly every sector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
29 |
+
page_content=' It has paved the way for scientific break- throughs in areas ranging from image recognition to com- plex medical diagnostics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
30 |
+
page_content=' The success of deep neural mod- els lies in their ability to learn complex non-linear functions and estimate distributions of high dimensional data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
31 |
+
page_content=' In ad- dition, open-source deep learning libraries enable fast large- scale deployment, making state-of-the-art algorithms avail- able for countless applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
32 |
+
page_content=' A central component of neu- ral networks is how well they are capable of representing the target data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
33 |
+
page_content=' Well designed models can capture unique repre- sentations of the data and ”learn” a function with a small er- ror margin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
34 |
+
page_content=' In contrast, poor representations are often incon- sistent and can produce semantically incorrect predictions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
35 |
+
page_content=' Therefore, understanding how the model represents and inter- acts with the data remains a very challenging and highly rele- vant research problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
36 |
+
page_content=' One application, where this behaviour is especially important, is deep learning models for compu- tational seismic interpretation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
37 |
+
page_content=' In seismic, there is limited open-source annotated data due to the high cost associated with data acquisition and expert annotation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
38 |
+
page_content=' For this reason, architectures designed for large computer vision applications over-fit on limited annotated seismic data and result in poor generalization capabilities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
39 |
+
page_content=' Due to the high relevance in this field, we present our method using the F3 block dataset ([1]) where several classes are underrepresented.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
40 |
+
page_content=' Nevertheless, the work is applicable to a wide range of 2D data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
41 |
+
page_content=' In this paper, we view neural networks in the context of their learning dynamics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
42 |
+
page_content=' Specifically, neural networks do not learn continually but forget samples over time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
43 |
+
page_content=' One branch of re- search investigates the forgotten information when a model is trained on one task but fine-tuned on another.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
44 |
+
page_content=' In literature, this is often referred to as catastrophic forgetting ([2, 3]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
45 |
+
page_content=' In contrast, [4] view the dynamics within a single data distribu- tion and track the frequency in which information is forgotten during training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
46 |
+
page_content=' In this paper, we build upon this intuition and visualize frequently forgotten regions in a generalized segmentation framework.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
47 |
+
page_content=' Similar to uncertainty works with Bayesian inference ([5]) or gradient based explanations ([6, 7, 8]), we can identify difficult regions and explain segmentation predictions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
48 |
+
page_content=' In contrast to other explainability techniques, fre- quently forgotten regions contain valuable information about the position within the representation space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
49 |
+
page_content=' Specifically, fre- quently forgotten regions are closer to the decision boundary and pose a threat to the generalization performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
50 |
+
page_content=' Based on these findings we engineer a method that identifies challeng- ing pixels and generates new samples that actively influence the representation mapping.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
51 |
+
page_content=' In Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
52 |
+
page_content=' 1 we show a toy exam- ple of our method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
53 |
+
page_content=' Based on the identified support vectors (circled blue disks), we generate new samples (green) that ac- tively shift the decision boundary (black line) to reduce the amount of support vectors for a specific class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
54 |
+
page_content=' In contrast to traditional data augmentation ([9]), our method is data-driven and consistently reduces support vectors within the model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
55 |
+
page_content=' The following are our contributions: First, we visualize dif- ficult regions in the data by analyzing the learning dynamics during training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
56 |
+
page_content=' Second, we develop a augmentation method that reduces prone regions by actively shifting the decision boundary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
57 |
+
page_content=' Lastly, we compare our technique to popular aug- mentation techniques in literature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
58 |
+
page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
59 |
+
page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
60 |
+
page_content=' Intuition of our support vector augmentation method 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
61 |
+
page_content=' METHODOLOGY Our goal is to quantify the learning behaviour during image segmentation by analysing the frequency of sample forget- ting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
62 |
+
page_content=' In this section, we formally define when a sample is for- gotten and how this relates to the proximity of samples to the decision boundary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
63 |
+
page_content=' Furthermore, we exploit these dynamics to actively shift the decision boundary in our model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
64 |
+
page_content=' Specif- ically, we identify support vectors in our training images and increase the variety through style transfer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
65 |
+
page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
66 |
+
page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
67 |
+
page_content=' Forgetting Events Intuitively, a sample is forgotten if it was classified correctly in a previous epoch and miss-classified in the current epoch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
68 |
+
page_content=' More formally, for image I with (pixel, annotation) tuples (xi, yi), we define the accuracy of each pixel at a epoch t as acct i = 1˜yt i=yi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
69 |
+
page_content=' (1) Here, 1˜yt i=yi refers to a binary variable indicating the cor- rectness of the classified pixel in image I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
70 |
+
page_content=' With this definition we say a pixel was forgotten at epoch t + 1 if the accuracy at t + 1 is strictly smaller than the accuracy at epoch t: f t i = int(acct+1 i < acct i) ∈ 1, 0 (2) Following [4], we define the binary event f t i as a forgetting event at epoch t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
71 |
+
page_content=' Since our application is a segmentation set- ting, we further visualize forgetting events in the spatial do- main.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
72 |
+
page_content=' Specifically, we count the amount of forgetting events occurring at each pixel i and display them in a heat map.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
73 |
+
page_content=' Mathematically, heat map L ∈ N0+ M×N is the sum over all forgetting events f t i that occurred in time frame T: Li = T � t=0 f t i (3) Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
74 |
+
page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
75 |
+
page_content=' An example of a forgetting event heat map as well as its corresponding image and annotation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
76 |
+
page_content=' Pixels close to the decision boundary are highlighted in different shades of red whereas pixels deep within the class manifold are dark blue.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
77 |
+
page_content=' Note, that several classes (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
78 |
+
page_content='g the orange class ”scruff”) are underrepresented For better illustration, we present an example of a heat map in Fig 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
79 |
+
page_content=' Areas that were forgotten frequently are highlighted in shades of red in contrast to pixels that were forgotten rarely (blue).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
80 |
+
page_content=' Similar to [4], we can broadly classify the pixels into two groups: The first group consists of the pixels that were never forgotten or forgotten only rarely (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
81 |
+
page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
82 |
+
page_content=' light blue class in the center of Fig 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
83 |
+
page_content=' Since every epoch represents a model update, we conclude that these pixels are never or only rarely shifted outside of the class manifold in the feature space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
84 |
+
page_content=' In contrast, the second group consists of pixels forgotten more frequently (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
85 |
+
page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
86 |
+
page_content=' the class boundaries in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
87 |
+
page_content=' 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
88 |
+
page_content=' Specifically, this means that several model updates shifted these pixels over the decision boundary during training, mapping them closer to the decision boundary than unforgettable pixels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
89 |
+
page_content=' Similar to [4], we argue that these pixels play a similar role to support vectors in maximal margin classifiers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
90 |
+
page_content=' In particular, we will show the importance of forgetting events in analyzing model predictions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
91 |
+
page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
92 |
+
page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
93 |
+
page_content=' Support Vector Augmentation As we have seen in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
94 |
+
page_content='1, forgetting events are a useful metric to quantify the sample position in the representation space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
95 |
+
page_content=' To be precise, forgetting events provide information about the proximity of samples to the decision boundary in a training interval T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
96 |
+
page_content=' In this section, we will exploit this information to increase the variety of forgettable pixels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
97 |
+
page_content=' In the seismic application, we achieve this through style transfer models ([10, 11]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
98 |
+
page_content=' Specifically, we transfer class specific visual features from a source image to a target image with- out changing the structure or characteristics of neighboring classes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
99 |
+
page_content=' We target specific classes with a high forgetting event density and transfer the characteristics to other sections with- out affecting the geologic properties of the seismic images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
100 |
+
page_content=' An example of a style transfer is presented in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
101 |
+
page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
102 |
+
page_content=' Here, we show the target for the transfer, the resulting transfer image (second column from the left), the target annotation, and the style source with its corresponding label.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
103 |
+
page_content=' The image on the far right of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
104 |
+
page_content=' 3 shows the difference between the transfer images of subsequent batches with different style sources.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
105 |
+
page_content=' In Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
106 |
+
page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
107 |
+
page_content=' Example of a feature transfer within two seismic images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
108 |
+
page_content=' this example, we transfer the visual features of class ”scruff” (orange) from the style source to the target image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
109 |
+
page_content=' Moreover, switching the source image largely affects the target class (difference image in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
110 |
+
page_content=' 3) and presents the desired func- tionality of our algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
111 |
+
page_content=' Our method consists of a segmentation model, a transfer model and a data selection step (Fig 4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
112 |
+
page_content=' First, our method trains the segmentation model on the training data and pro- duces a forgetting event heat map for every validation image in the training volume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
113 |
+
page_content=' In principle, heat maps could be produced for the entire training set but is computationally inefficient.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
114 |
+
page_content=' In our implementation, the segmentation archi- tecture is based on the deeplab-v3 architecture by [12] with a resnet-18 ([13]) backbone.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
115 |
+
page_content=' Our choice is based on empirical evaluations of performance and computational efficiency.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
116 |
+
page_content=' In the next step of our workflow, we calculate the forgetting event density within each class of a heat map.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
117 |
+
page_content=' Specifically, we sum all forgetting events fi∈ck within class ck of a heat map and divide by the number of pixels of class ck in the im- age.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
118 |
+
page_content=' This metric allows us to rank each heat map according to its density with regard to an arbitrary class in the dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
119 |
+
page_content=' Finally, we transfer the visual features of a predefined class from the images with the highest density to randomly sampled training images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
120 |
+
page_content=' Here, our architecture is a slightly altered version of [10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
121 |
+
page_content=' In short, the model modulates the style characteristics on the batch-normalization outputs within the image generator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
122 |
+
page_content=' This enables class specific transfers without affecting the geology of the image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
123 |
+
page_content=' In our method, we trans- fer the underrepresented classes within our data-set as these classes are generally most difficult to learn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
124 |
+
page_content=' After generation, the transferred images are added to our training pool and the segmentation model is trained from scratch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
125 |
+
page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
126 |
+
page_content=' RESULTS AND DISCUSSION To produce computationally efficient forgetting event heat maps, we train the network for 60 epochs and only track the validation and test set heat maps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
127 |
+
page_content=' In each of our experiments, the validation set is chosen by selecting every fifth vertical slice (referred to as inlines) and horizontal slice (referred to Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
128 |
+
page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
129 |
+
page_content=' Entire workflow of our architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
130 |
+
page_content=' as crosslines) of the training volume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
131 |
+
page_content=' Subsequently, we query six images with the highest forgetting event density of our target class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
132 |
+
page_content=' Each image is used as a style source to generate 64 transfer images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
133 |
+
page_content=' For generation, we sample randomly to obtain the target image and retrain the segmentation model from scratch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
134 |
+
page_content=' In this paper, we only report the results when transferring the orange class (scruff).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
135 |
+
page_content=' Other underrepresented classes (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
136 |
+
page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
137 |
+
page_content=' the red class zechstein) rendered similar results and are omitted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
138 |
+
page_content=' In our numerical analysis, our results are averaged over five separate experiments to account for ran- dom factors (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
139 |
+
page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
140 |
+
page_content=' initialization).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
141 |
+
page_content=' We compare our method to other common augmentation methods (random horizontal flip and random rotations) in terms of segmentation performance (in class accuracy) and the forgetting event heat maps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
142 |
+
page_content=' The results are shown in Table 1 and Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
143 |
+
page_content=' 5 respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
144 |
+
page_content=' Overall, our method reduces the amount of forgetting events signifi- cantly more than other augmentation methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
145 |
+
page_content=' Specifically, we find that several regions with a high forgetting event den- sity are transferred to a low density or disappear entirely (bottom class in Section 2 or entire right part of Section 6).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
146 |
+
page_content=' These regions were shifted away from the decision boundary and model updates had little or no affect on the classification F3 Data Unc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
147 |
+
page_content=' Selection Synth F3 DataClass Accuracy Class Upper N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
148 |
+
page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
149 |
+
page_content=' Middle N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
150 |
+
page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
151 |
+
page_content=' Lower N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
152 |
+
page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
153 |
+
page_content=' Chalk Scruff Zechstein Baseline 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
154 |
+
page_content='982 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
155 |
+
page_content='912 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
156 |
+
page_content='969 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
157 |
+
page_content='816 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
158 |
+
page_content='383 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
159 |
+
page_content='651 Random Flip 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
160 |
+
page_content='983 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
161 |
+
page_content='899 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
162 |
+
page_content='967 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
163 |
+
page_content='820 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
164 |
+
page_content='354 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
165 |
+
page_content='672 Random Rotate 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
166 |
+
page_content='974 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
167 |
+
page_content='933 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
168 |
+
page_content='974 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
169 |
+
page_content='824 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
170 |
+
page_content='533 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
171 |
+
page_content='681 Ours 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
172 |
+
page_content='982 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
173 |
+
page_content='906 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
174 |
+
page_content='966 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
175 |
+
page_content='810 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
176 |
+
page_content='438 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
177 |
+
page_content='656 Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
178 |
+
page_content=' Averaged class accuracy over five augmentation experiments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
179 |
+
page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
180 |
+
page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
181 |
+
page_content=' Heat maps when using different augmentation methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
182 |
+
page_content=' Our method significantly reduces the amount of forgetting events and impacts the regions shape.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
183 |
+
page_content=' accuracy during training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
184 |
+
page_content=' In contrast, we find that no for- getting event regions disappear in the standard augmentation methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
185 |
+
page_content=' Instead, the severity of forgetting event regions is reduced.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
186 |
+
page_content=' Numerically, all methods overwhelmingly match or outper- form the baseline with respect to class accuracy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
187 |
+
page_content=' We note, that our method only affects the scruff class accuracy and matches the baseline performance of all other classes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
188 |
+
page_content=' This shows flexibility in our algorithm and allows an increased control over the network performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
189 |
+
page_content=' We further observe that random rotations outperform our technique even in the scruff class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
190 |
+
page_content=' Although the class accuracy is higher, the for- getting event maps show significantly more forgetting event regions than the maps produced by our method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
191 |
+
page_content=' Moreover, the locations and shapes of the prone regions produced by the traditional methods are similar to the baseline regions (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
192 |
+
page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
193 |
+
page_content=' bottom class of Section 3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
194 |
+
page_content=' In contrast, our method changes the shape and location of the severe forgetting event region indicating a clear shift in the representation space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
195 |
+
page_content=' Finally, we also identify regions with a lower forgetting event density that transitioned to a higher density (Section 5 bot- tom left) by applying our method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
196 |
+
page_content=' This allows us to analyze model weaknesses and interpret the segmentation output in light of training difficulty.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
197 |
+
page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
198 |
+
page_content=' CONCLUSION In this paper, we explain the behaviour of deep models by tracking how often samples are forgotten in between model updates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
199 |
+
page_content=' We identify regions that are especially difficult for the model and evaluate how these regions change when dif- ferent segmentation strategies are pursued.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
200 |
+
page_content=' Finally, we engi- neer a novel method that explicitly exploits this characteristic to actively influence how the data is represented within the model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
201 |
+
page_content=' We show that our method increases the margin of dif- ficult regions indicating a clear decision boundary shift.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
202 |
+
page_content=' Section 1 Section 2 Section 3 Section 4 Section 5 Section 6 15 15 Baseline 10 10 10 10 15 Random Flip 10 10 10 10 10 20 15 15 15 15 Random 10 10 10 Rotation 20 15 15 15 Ours 10 105.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
203 |
+
page_content=' REFERENCES [1] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
204 |
+
page_content=' Alaudah, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
205 |
+
page_content=' Michałowicz, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
206 |
+
page_content=' Alfarraj, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
207 |
+
page_content=' Al- Regib, “A machine-learning benchmark for facies clas- sification,” Interpretation, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
208 |
+
page_content=' 7, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
209 |
+
page_content=' 3, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
210 |
+
page_content=' SE175– SE187, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
211 |
+
page_content=' [2] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
212 |
+
page_content=' Kirkpatrick, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
213 |
+
page_content=' Pascanu, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
214 |
+
page_content=' Rabinowitz, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
215 |
+
page_content=' Veness, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
216 |
+
page_content=' Desjardins, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
217 |
+
page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
218 |
+
page_content=' Rusu, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
219 |
+
page_content=' Milan, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
220 |
+
page_content=' Quan, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
221 |
+
page_content=' Ra- malho, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
222 |
+
page_content=' Grabska-Barwinska, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
223 |
+
page_content=', “Overcoming catastrophic forgetting in neural networks,” Proceedings of the national academy of sciences, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
224 |
+
page_content=' 114, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
225 |
+
page_content=' 13, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
226 |
+
page_content=' 3521–3526, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
227 |
+
page_content=' [3] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
228 |
+
page_content=' Ritter, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
229 |
+
page_content=' Botev, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
230 |
+
page_content=' Barber, “Online structured laplace approximations for overcoming catastrophic for- getting,” in Advances in Neural Information Processing Systems, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
231 |
+
page_content=' 3738–3748, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
232 |
+
page_content=' [4] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
233 |
+
page_content=' Toneva, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
234 |
+
page_content=' Sordoni, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
235 |
+
page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
236 |
+
page_content=' d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
237 |
+
page_content=' Combes, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
238 |
+
page_content=' Trischler, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
239 |
+
page_content=' Bengio, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
240 |
+
page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
241 |
+
page_content=' Gordon, “An empirical study of ex- ample forgetting during deep neural network learning,” arXiv preprint arXiv:1812.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
242 |
+
page_content='05159, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
243 |
+
page_content=' [5] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
244 |
+
page_content=' Kendall and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
245 |
+
page_content=' Gal, “What uncertainties do we need in bayesian deep learning for computer vision?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
246 |
+
page_content=',” in Advances in neural information processing systems, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
247 |
+
page_content=' 5574–5584, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
248 |
+
page_content=' [6] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
249 |
+
page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
250 |
+
page_content=' Selvaraju, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
251 |
+
page_content=' Cogswell, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
252 |
+
page_content=' Das, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
253 |
+
page_content=' Vedantam, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
254 |
+
page_content=' Parikh, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
255 |
+
page_content=' Batra, “Grad-cam: Visual explana- tions from deep networks via gradient-based localiza- tion,” in Proceedings of the IEEE international confer- ence on computer vision, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
256 |
+
page_content=' 618–626, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
257 |
+
page_content=' [7] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
258 |
+
page_content=' Prabhushankar, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
259 |
+
page_content=' Kwon, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
260 |
+
page_content=' Temel, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
261 |
+
page_content=' Al- Regib, “Contrastive explanations in neural networks,” in 2020 IEEE International Conference on Image Process- ing (ICIP), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
262 |
+
page_content=' 3289–3293, IEEE, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
263 |
+
page_content=' [8] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
264 |
+
page_content=' Lee and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
265 |
+
page_content=' AlRegib, “Gradients as a measure of uncertainty in neural networks,” in 2020 IEEE In- ternational Conference on Image Processing (ICIP), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
266 |
+
page_content=' 2416–2420, IEEE, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
267 |
+
page_content=' [9] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
268 |
+
page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
269 |
+
page_content=' Orr and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
270 |
+
page_content='-R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
271 |
+
page_content=' M¨uller, Neural networks: tricks of the trade.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
272 |
+
page_content=' Springer, 2003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
273 |
+
page_content=' [10] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
274 |
+
page_content=' Zhu, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
275 |
+
page_content=' Abdal, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
276 |
+
page_content=' Qin, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
277 |
+
page_content=' Wonka, “Sean: Image synthesis with semantic region-adaptive normalization,” in Proceedings of the IEEE/CVF Conference on Com- puter Vision and Pattern Recognition, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
278 |
+
page_content=' 5104–5113, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
279 |
+
page_content=' [11] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
280 |
+
page_content=' Park, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
281 |
+
page_content='-Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
282 |
+
page_content=' Liu, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
283 |
+
page_content='-C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
284 |
+
page_content=' Wang, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
285 |
+
page_content='-Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
286 |
+
page_content=' Zhu, “Seman- tic image synthesis with spatially-adaptive normaliza- tion,” in Proceedings of the IEEE Conference on Com- puter Vision and Pattern Recognition, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
287 |
+
page_content=' 2337–2346, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
288 |
+
page_content=' [12] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
289 |
+
page_content='-C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
290 |
+
page_content=' Chen, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
291 |
+
page_content=' Papandreou, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
292 |
+
page_content=' Kokkinos, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
293 |
+
page_content=' Murphy, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
294 |
+
page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
295 |
+
page_content=' Yuille, “Deeplab: Semantic image segmenta- tion with deep convolutional nets, atrous convolution, and fully connected crfs,” IEEE transactions on pat- tern analysis and machine intelligence, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
296 |
+
page_content=' 40, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
297 |
+
page_content=' 4, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
298 |
+
page_content=' 834–848, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
299 |
+
page_content=' [13] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
300 |
+
page_content=' He, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
301 |
+
page_content=' Zhang, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
302 |
+
page_content=' Ren, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
303 |
+
page_content=' Sun, “Deep residual learning for image recognition,” in Proceedings of the IEEE conference on computer vision and pattern recog- nition, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
304 |
+
page_content=' 770–778, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtE2T4oBgHgl3EQf8wk1/content/2301.04221v1.pdf'}
|
CdE5T4oBgHgl3EQfTw8s/content/2301.05538v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:174c8cd38b6b557543eca211994ab9272056b2299ab26ababc8b9c3ae1c24c47
|
3 |
+
size 10452790
|
EtE4T4oBgHgl3EQf6w5f/content/tmp_files/2301.05334v1.pdf.txt
ADDED
@@ -0,0 +1,1685 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
TransfQMix: Transformers for Leveraging the Graph Structure of
|
2 |
+
Multi-Agent Reinforcement Learning Problems
|
3 |
+
Matteo Gallici
|
4 |
+
KEMLG Research Group, Universitat
|
5 |
+
Politècnica de Catalunya
|
6 |
+
Barcelona, Spain
|
7 |
+
gallici@cs.upc.edu
|
8 |
+
Mario Martin
|
9 |
+
KEMLG Research Group, Universitat
|
10 |
+
Politècnica de Catalunya
|
11 |
+
Barcelona, Spain
|
12 |
+
mmartin@cs.upc.edu
|
13 |
+
Ivan Masmitja
|
14 |
+
Institut de Ciències del Mar (ICM),
|
15 |
+
CSIC
|
16 |
+
Barcelona, Spain
|
17 |
+
masmitja@icm.csic.es
|
18 |
+
ABSTRACT
|
19 |
+
Coordination is one of the most difficult aspects of multi-agent re-
|
20 |
+
inforcement learning (MARL). One reason is that agents normally
|
21 |
+
choose their actions independently of one another. In order to see
|
22 |
+
coordination strategies emerging from the combination of inde-
|
23 |
+
pendent policies, the recent research has focused on the use of a
|
24 |
+
centralized function (CF) that learns each agent’s contribution to
|
25 |
+
the team reward. However, the structure in which the environment
|
26 |
+
is presented to the agents and to the CF is typically overlooked.
|
27 |
+
We have observed that the features used to describe the coordi-
|
28 |
+
nation problem can be represented as vertex features of a latent
|
29 |
+
graph structure. Here, we present TransfQMix, a new approach that
|
30 |
+
uses transformers to leverage this latent structure and learn better
|
31 |
+
coordination policies. Our transformer agents perform a graph rea-
|
32 |
+
soning over the state of the observable entities. Our transformer
|
33 |
+
Q-mixer learns a monotonic mixing-function from a larger graph
|
34 |
+
that includes the internal and external states of the agents. Trans-
|
35 |
+
fQMix is designed to be entirely transferable, meaning that same
|
36 |
+
parameters can be used to control and train larger or smaller teams
|
37 |
+
of agents. This enables to deploy promising approaches to save
|
38 |
+
training time and derive general policies in MARL, such as transfer
|
39 |
+
learning, zero-shot transfer, and curriculum learning. We report
|
40 |
+
TransfQMix’s performances in the Spread and StarCraft II environ-
|
41 |
+
ments. In both settings, it outperforms state-of-the-art Q-Learning
|
42 |
+
models, and it demonstrates effectiveness in solving problems that
|
43 |
+
other methods can not solve.
|
44 |
+
KEYWORDS
|
45 |
+
Multi-Agent Reinforcement Learning, Transformers, Coordination
|
46 |
+
Graphs, Transfer Learning
|
47 |
+
ACM Reference Format:
|
48 |
+
Matteo Gallici, Mario Martin, and Ivan Masmitja. 2023. TransfQMix: Trans-
|
49 |
+
formers for Leveraging the Graph Structure of Multi-Agent Reinforcement
|
50 |
+
Learning Problems. In PREPRINT VERSION, accepted at: Proc. of the 22nd
|
51 |
+
International Conference on Autonomous Agents and Multiagent Systems
|
52 |
+
(AAMAS 2023), London, United Kingdom, May 29 – June 2, 2023, IFAAMAS,
|
53 |
+
9 pages.
|
54 |
+
1
|
55 |
+
INTRODUCTION
|
56 |
+
In order to solve cooperative multi-agent problems, it is critical
|
57 |
+
that agents behave in a coordinated manner. Deep reinforcement
|
58 |
+
PREPRINT VERSION, accepted at: Proc. of the 22nd International Conference on
|
59 |
+
Autonomous Agents and Multiagent Systems (AAMAS 2023), A. Ricci, W. Yeoh, N. Agmon,
|
60 |
+
B. An (eds.), May 29 – June 2, 2023, London, United Kingdom. © 2023 International
|
61 |
+
Foundation for Autonomous Agents and Multiagent Systems (www.ifaamas.org). All
|
62 |
+
rights reserved.
|
63 |
+
learning (RL) has been successfully applied to numerous multi-
|
64 |
+
agent optimization tasks [6, 8, 12]. When we try to apply RL to
|
65 |
+
learn coordination policies, however, we face numerous challenges.
|
66 |
+
Due to communication constraints, the deployment of a central
|
67 |
+
controller is not practical. Even when communication is allowed,
|
68 |
+
the large size of the observation and action spaces introduces the
|
69 |
+
curse of dimensionality, discouraging the use of a single actuator.
|
70 |
+
Agents should therefore choose their actions independently of one
|
71 |
+
another. In order to see coordinating strategies emerging from the
|
72 |
+
combination of independent policies, state-of-the-art multi-agent
|
73 |
+
reinforcement learning (MARL) models use one or more centralized
|
74 |
+
functions (CFs) to learn the contribution of the agents’ actions to
|
75 |
+
the team goal. The CFs allow to optimize the agents’ parameters
|
76 |
+
with respect to a global team reward. Once trained, they can still be
|
77 |
+
deployed autonomously since each agent is in charge of choosing
|
78 |
+
its own behavior. This approach is referred to as the centralized-
|
79 |
+
training-decentralized-execution (CTDE) paradigm [4, 7].
|
80 |
+
During the last years, most of the works have focused on the
|
81 |
+
CFs of CTDE. Methods such as Value Decomposition Networks
|
82 |
+
(VDN) [21], QMix [18], and QTran [20] extended the traditional
|
83 |
+
Q-Learning algorithm [28] with a central network that (learns to)
|
84 |
+
project the agent’s action-values over the q-value of the joint action.
|
85 |
+
Actor-critic models such as Multi-Agent Deep Deterministic Policy
|
86 |
+
gradient (MADDPG) [11] and Multi-Agent Proximal Policy Opti-
|
87 |
+
mization (MAPPO) [30], allow the critic networks to access global
|
88 |
+
observations during training. More recent approaches, like Deep
|
89 |
+
Implicit Coordination Graphs (DICG) [9] and QPlex [26] refined
|
90 |
+
the CF with the use of multi-head self-attention and graph neural
|
91 |
+
networks. Nonetheless, individual agents are usually kept simple
|
92 |
+
by employing recurrent neural networks (RNN) fed by observation
|
93 |
+
vectors that are large concatenations of various types of features
|
94 |
+
(see Figure 1a). By performing these concatenations a key infor-
|
95 |
+
mation is lost: the fact that many of the features are exactly of the
|
96 |
+
same type despite referring to separate entities (e.g., the position
|
97 |
+
in a map).
|
98 |
+
Our work shows that the structure of the observation space,
|
99 |
+
as well as the architecture used to deploy the agents and the CFs,
|
100 |
+
play an important role in solving complex coordination tasks. We
|
101 |
+
suggest that observation vectors contain mostly vertex features of
|
102 |
+
a latent graph structure that becomes explicit when we reconsider
|
103 |
+
how they are fed into neural networks. Consequently, instead of
|
104 |
+
chaining together many features to generate a vector that describes
|
105 |
+
the state of the world observed by the agent, we generalize a set
|
106 |
+
of features and we use them to describe the state of the entities
|
107 |
+
observed by the agent (or the CFs). Our approach is depicted in
|
108 |
+
Figure 1b. We do not include any additional information in this
|
109 |
+
|
110 |
+
MLP
|
111 |
+
RNN
|
112 |
+
(a) Traditional approach
|
113 |
+
Embedder
|
114 |
+
Self-Attention
|
115 |
+
(b) Our graph approach
|
116 |
+
Figure 1: A traditional observation vector and our graph approach.
|
117 |
+
In traditional approaches (a), the observation vector for the agent 𝑎
|
118 |
+
at the time step 𝑡 is defined by a concatenation of features relative
|
119 |
+
to itself, to the other 𝑘 − 1 entities, and to additional elements (e.g.,
|
120 |
+
previous actions). In our approach (b), we keep only the 𝑧 features
|
121 |
+
defined for all the entities to generate the vertices of a coordination
|
122 |
+
graph, the edges of which are learned via a self-attention mechanism.
|
123 |
+
process. On the contrary, sometimes we need to remove data that
|
124 |
+
is not accessible for all the observed entities. There are several
|
125 |
+
advantages in this approach: (i) we can employ the same weights
|
126 |
+
of an embedded feed forward network to process the same vertex
|
127 |
+
features, reducing the complexity of the feature space; and (ii), we
|
128 |
+
can learn the edges of the latent coordination graph using a self-
|
129 |
+
attention mechanism. In particular, we employ transformers [24],
|
130 |
+
which have been shown to be an effective graph-based architecture
|
131 |
+
in natural language processing [27], computer vision [29], and even
|
132 |
+
for developing a generalist agent [19].
|
133 |
+
Our transformer agents sample their actions after processing the
|
134 |
+
graph of entities observed at a specific time step. Our transformer
|
135 |
+
Q-mixer learns a monotonic mixing-function from a larger graph
|
136 |
+
that contains the agents’ internal and external states. Given the
|
137 |
+
strong temporal dependencies in RL problems, we add a recurrent
|
138 |
+
mechanism in both the agents and the mixer, which allows us to
|
139 |
+
affect the graph reasoning at a certain time step with an embed-
|
140 |
+
ding of the preceding. The resulting model, TransfQMix, has the
|
141 |
+
advantage of being totally transferable, meaning that the same
|
142 |
+
parameters can be applied to control and train larger or smaller
|
143 |
+
teams. This is allowed since the networks’ weights constitute an
|
144 |
+
attention mechanism that is independent of the number of ver-
|
145 |
+
tices to which it is applied. Traditional models, conversely, must
|
146 |
+
be re-trained every time we introduce a new entity, because the
|
147 |
+
dimension of the concatenated vectors changes, and therefore the
|
148 |
+
networks weights must be readjusted. The total transferability of
|
149 |
+
TransfQMix enables to deploy transfer learning, zero-shot transfer,
|
150 |
+
and curriculum learning, which are crucial steps towards more
|
151 |
+
general models in MARL.
|
152 |
+
We tested TransfQMix in multiple scenarios of the Spread task
|
153 |
+
[11], and in the hardest maps of StarCraft II (SC2) [25]. TransfQMix
|
154 |
+
outperformed state-of-the-art Q-Learning models in both environ-
|
155 |
+
ments, and it could solve problems that others can not address,
|
156 |
+
showing in general faster convergence to better coordination poli-
|
157 |
+
cies.
|
158 |
+
The following is a list of the contributions of this paper:
|
159 |
+
(1) We formalize a new paradigm for cooperative MARL, which
|
160 |
+
consists of rethinking the coordination tasks as graph em-
|
161 |
+
bedding tasks.
|
162 |
+
(2) We present a new method, TransfQMix, that uses transform-
|
163 |
+
ers to leverage coordination graphs and outperforms state-
|
164 |
+
of-the-art Q-Learning methods.
|
165 |
+
(3) We introduce a graph-based recurrent mechanism for includ-
|
166 |
+
ing a time dependency in both the transformer agents and
|
167 |
+
mixer.
|
168 |
+
(4) We design TransfQMix to be able to process graphs of enti-
|
169 |
+
ties of varying sizes. This allows us to obtain a more general
|
170 |
+
method which can be used to deploy zero-shot transfer, trans-
|
171 |
+
fer learning, and curriculum learning in MARL.
|
172 |
+
2
|
173 |
+
RELATED WORK
|
174 |
+
Recent state-of-the-art methods tackle MARL problems using the
|
175 |
+
CTDE paradigm [4, 7]. The CTDE approach was deployed success-
|
176 |
+
fully with policy-based and value-based methods [9, 30, 31]. Here,
|
177 |
+
we have focused on value-based methods that use CTDE.
|
178 |
+
A necessary condition for implementing CTDE effectively in
|
179 |
+
multi-agent Q-Learning is that a greedy sampling of the joint ac-
|
180 |
+
tion is equivalent to sampling the actions greedily from the in-
|
181 |
+
dividual agents [25]. This principle is known as the individual-
|
182 |
+
global-max (IGM) [20]. VDN has been one of the first methods to
|
183 |
+
extend Q-Learning to MARL using CTDE [21]. It implements a not-
|
184 |
+
parameterized CF which computes the𝑄𝑡𝑜𝑡 of the joint action as the
|
185 |
+
sum of the individual agents’ action-values. Despite respecting IGM,
|
186 |
+
this CF is too simple to model effectively the agents’ contribution
|
187 |
+
to 𝑄𝑡𝑜𝑡 [18].
|
188 |
+
QMix [18] demonstrated that in order to satisfy IGM, it is suffi-
|
189 |
+
cient that the CF is monotonic with regard to the individual action-
|
190 |
+
values. As a result, the VDN’s sum-function is substituted with a
|
191 |
+
multi-layer perceptron (MLP). This mixer network can learn sophis-
|
192 |
+
ticated non-linear projections of several action-values over 𝑄𝑡𝑜𝑡.
|
193 |
+
Its weights are generated by a set of hypernetworks conditioned by
|
194 |
+
the state 𝑠 and are forced to be positive by an absolute activation
|
195 |
+
function. Our proposed method is a refined version of QMix. In
|
196 |
+
particular, TransfQMix also learns a monotonic CF conditioned
|
197 |
+
by 𝑠 that serves to produce 𝑄𝑡𝑜𝑡 from the individual action-values.
|
198 |
+
Nonetheless, TransfQMix is a much more sophisticated method for
|
199 |
+
the use of transformers.
|
200 |
+
Previous methods have attempted to improve QMix. OWQMix
|
201 |
+
and CWQMix [17] used a weighting mechanism for learning non-
|
202 |
+
monotonic CFs, giving more importance to better joint actions.
|
203 |
+
QTran [20] learned a factorization of 𝑄𝑡𝑜𝑡 that was also free of
|
204 |
+
monotonicity, but it did this via several MLPs. QPlex [26] proposed
|
205 |
+
a dueling structure to learn non-monotonic CFs while adhering
|
206 |
+
to IGM principle. Notice that QPlex, like TransfQMix, employed
|
207 |
+
multi-head attention, but only for a subset of their centralized
|
208 |
+
dueling network. All of these approaches involved RNN agents and
|
209 |
+
large concatenated observation vectors. Despite showing significant
|
210 |
+
advantages in simple theoretical frameworks, it is still debated
|
211 |
+
whether relaxing the monotonicity constraint benefits modeling
|
212 |
+
|
213 |
+
complex problems [26]. Our refinement of QMix focuses on the
|
214 |
+
representation of cooperative games and the networks architecture
|
215 |
+
rather than monotonicity.
|
216 |
+
Transformers were successfully deployed in single agent RL [16],
|
217 |
+
but required architecture modifications. Such adjustments are un-
|
218 |
+
necessary for multi-agent problems, since they can be represented
|
219 |
+
more naturally as graph problems. DeepMind’s generalist agent
|
220 |
+
(Gato) [19] is a standard transformer that can solve a variety of RL
|
221 |
+
tasks, but it has not been tested in multi-agent settings. Further-
|
222 |
+
more, Gato is not trained using RL, but rather through a supervised
|
223 |
+
approach. In a method known as universal policy decomposition
|
224 |
+
transformer (UPDET) [5], transformers were applied to a subset of
|
225 |
+
the SC2 tasks. UPDET adopted the QMix framework but replaced
|
226 |
+
RNN agents with transformers, and used a decoupling policy sys-
|
227 |
+
tem in which the q-values of entity-based actions (particularly, the
|
228 |
+
q-value of attacking a specific enemy in SC2) were generated by the
|
229 |
+
transformer embedding of that entity. The model performed well in
|
230 |
+
the SC2 subset, but it was not stated how it could be applied to other
|
231 |
+
MARL problems. Moreover, the authors demonstrate that, in the
|
232 |
+
absence of the decoupling approach, QMix performed better when
|
233 |
+
utilizing RNN rather than transformers. Because policy decoupling
|
234 |
+
is not applicable in many scenarios, UPDET appears to be effective
|
235 |
+
only for very specific problems.
|
236 |
+
Our method formalizes a generic framework that shows clear
|
237 |
+
benefits of using transformers also when policy decoupling is not
|
238 |
+
applicable. TransfQMix employees a transformer also in the central
|
239 |
+
mixer, whereas UPDET deploys the same MLPs of QMix. This makes
|
240 |
+
TransfQMix a totally transferable method. In contrast, UPDET is
|
241 |
+
only partially transferable, because the mixer network must be re-
|
242 |
+
trained every time the agents are applied to a new task. TransfQMix
|
243 |
+
uses a recurrent graph approach similar to the one introduced by UP-
|
244 |
+
DET. However, TransfQMix makes a better use of the hidden-state
|
245 |
+
by sampling the non-decoupled actions directly from it. Moreover,
|
246 |
+
TransfQMix employs this recurrent mechanism as well in the mixer
|
247 |
+
network. To our knowledge, this is the first method that includes a
|
248 |
+
temporal conditioning in a CF.
|
249 |
+
Zero-shot transfer, transfer learning, and curriculum learning
|
250 |
+
were explored in MARL by [1] using an entity-based graph method
|
251 |
+
similar to ours. That technique, however, was limited to communi-
|
252 |
+
cation problems, whereas TransfQMix aims to be a general MARL
|
253 |
+
method.
|
254 |
+
3
|
255 |
+
BACKGROUND
|
256 |
+
Cooperative multi-agent tasks are formalized as decentralised par-
|
257 |
+
tially observable Markov decision process (Dec-POMDP) [13]. A tuple
|
258 |
+
𝐺 = ⟨𝑆,𝑈, 𝑃,𝑟,𝑍,𝑂, 𝐻,𝑛,𝛾⟩ describes the agents 𝑎 ∈ 𝐴 ≡ {1, . . . ,𝑛}
|
259 |
+
which at every time step choose an action 𝑢𝑎 ∈ 𝑈 from their hid-
|
260 |
+
den state ℎ𝑎 ∈ 𝐻, forming a joint action u ∈ U ≡ 𝑈 𝑛. This causes
|
261 |
+
a transition on the environment according to the state transition
|
262 |
+
function 𝑃 (𝑠′ | 𝑠, u) : 𝑆 × U × 𝑆 → [0, 1], where 𝑠 ∈ 𝑆 is the true
|
263 |
+
state of the environment. All agents share the same reward func-
|
264 |
+
tion 𝑟 (𝑠, u) : 𝑆 �� U → R and 𝛾 ∈ [0, 1) is a discount factor. The
|
265 |
+
agents have access only to partial observations of the environment,
|
266 |
+
𝑧 ∈ 𝑍 according to the observation function 𝑂(𝑠,𝑎) : 𝑆 × 𝐴 → 𝑍.
|
267 |
+
Each agent has an action-observation history 𝜏𝑎 ∈ 𝑇 ≡ (𝑍 ×
|
268 |
+
𝑈 )∗, on which it conditions a stochastic policy 𝜋𝑎 (𝑢𝑎 | 𝜏𝑎) : 𝑇×
|
269 |
+
𝑈 → [0, 1]. The joint policy 𝜋 has a joint action-value function:
|
270 |
+
𝑄𝜋 (𝑠𝑡, u𝑡) = E𝑠𝑡+1:∞, u𝑡+1:∞ [𝑅𝑡 | 𝑠𝑡, u𝑡], where 𝑅𝑡 = �∞
|
271 |
+
𝑖=0 𝛾𝑖𝑟𝑡+𝑖 is
|
272 |
+
the discounted return.
|
273 |
+
In order to find the optimal joint action-value function𝑄∗(𝑠, 𝒖) =
|
274 |
+
𝑟 (𝑠, 𝒖) + 𝛾E𝑠′ [max𝒖′ 𝑄∗ (𝑠′, 𝒖′)], we use Q-Learning [28] with a
|
275 |
+
deep neural network parameterized by 𝜃 [23] to minimize the ex-
|
276 |
+
pected TD error [26]:
|
277 |
+
L(𝜽) = E(𝝉,𝒖,𝑟,𝝉′)∈𝐷
|
278 |
+
��𝑟 + 𝛾𝑉 �𝝉′;𝜽 −� − 𝑄(𝝉, 𝒖;𝜽)�2�
|
279 |
+
(1)
|
280 |
+
where 𝑉 (𝝉′;𝜽 −) = max𝒖′ 𝑄 (𝝉′, 𝒖′;𝜽 −) is the one-step expected
|
281 |
+
future return of the TD target and 𝜃 − are the parameters of the
|
282 |
+
target network, which will be periodically updated with 𝜃. We use
|
283 |
+
a buffer 𝐷 to store the transition tuple (𝝉, 𝒖,𝑟,𝝉′), where 𝑟 is the
|
284 |
+
reward for taking action 𝒖 at joint action-observation history 𝝉
|
285 |
+
with a transition to 𝝉′.
|
286 |
+
We adopt a monotonic CTDE learning paradigm [4, 7, 18, 21].
|
287 |
+
Execution is decentralized, meaning that each agent’s learnt pol-
|
288 |
+
icy is conditioned only on its own action-observation history 𝜏𝑎.
|
289 |
+
During training, a central mixer network has access to the global
|
290 |
+
state 𝑠 of the environment and the hidden states of the agents 𝐻 for
|
291 |
+
projecting the individual action-values over the 𝑄𝑡𝑜𝑡 of the joint
|
292 |
+
action, which is used in equation (1) to train the model end to end.
|
293 |
+
The monotonic constraint imposed to the CF is the same formalized
|
294 |
+
by QMix:
|
295 |
+
𝜕𝑄𝑡𝑜𝑡
|
296 |
+
𝜕𝑄𝑎
|
297 |
+
≥ 0, ∀𝑎 ∈ 𝐴
|
298 |
+
(2)
|
299 |
+
which ensures that the IGM principle is respected.
|
300 |
+
The neural networks in our method are transformers [24], which
|
301 |
+
make a large use of the attention mechanism [2]. Specifically, we use
|
302 |
+
transformers to manipulate our graphs via multi-head self-attention
|
303 |
+
(MHSA) [10, 15, 24]. Given an embedded graph matrix X𝑛×ℎ of
|
304 |
+
𝑛 vertices represented with ℎ-dimensional vectors, a transformer
|
305 |
+
computes a set of queries Q = XW𝑄, keys K = XW𝐾, and values
|
306 |
+
V = XW𝑉 , where W𝑄, W𝐾, W𝑉 are three different parameterized
|
307 |
+
matrices with dimensions ℎ×𝑘. The self-attention is then computed
|
308 |
+
as:
|
309 |
+
Self-Attention(X) = Attention(Q, K, V) = softmax
|
310 |
+
� QK⊤
|
311 |
+
√𝑛
|
312 |
+
�
|
313 |
+
V
|
314 |
+
(3)
|
315 |
+
A transformer uses𝑚 attention modules in parallel, and then con-
|
316 |
+
catenates all the outputs and projects them back to ℎ-dimensional
|
317 |
+
vectors using a final W𝑂 feed-forward layer:
|
318 |
+
MultiHeadSelfAttn(X) = 𝐶𝑜𝑛𝑐𝑎𝑡 (head 1, · · · , head𝑚) W𝑂
|
319 |
+
where ℎ𝑒𝑎𝑑𝑖 = Attention
|
320 |
+
�
|
321 |
+
XW𝑄
|
322 |
+
𝑖 , XW𝐾
|
323 |
+
𝑖 , XW𝑉
|
324 |
+
𝑖
|
325 |
+
�
|
326 |
+
.
|
327 |
+
(4)
|
328 |
+
4
|
329 |
+
METHOD
|
330 |
+
4.1
|
331 |
+
Graph Observations and State
|
332 |
+
Our method rethinks how cooperative problems are presented to
|
333 |
+
neural networks. For the sake of simplicity, here we assume that
|
334 |
+
an agent observes 𝑘 entities at each time step 𝑡, where 𝑘 is the total
|
335 |
+
number of entities in the environment. In our approach, a set of 𝑧
|
336 |
+
features defines each entity. Because of the environment’s partial
|
337 |
+
observability, the features can take different values for each agent.
|
338 |
+
|
339 |
+
Agent 1
|
340 |
+
Agent N
|
341 |
+
Mixing Network
|
342 |
+
Embedder
|
343 |
+
Transformer Block
|
344 |
+
Embedder
|
345 |
+
Transformer
|
346 |
+
Block
|
347 |
+
Figure 2: (a) Transformer Mixer. (b) Overall TransfQMix architecture. (c) Transformer Agent. The purple dotted lines represent the recurrent
|
348 |
+
connections. The green components are simple feed-forward layers (embedders and scalar projectors), and the green circles are the embedded
|
349 |
+
vertices. The purple circles are transformed vertices. The dotted green components represent the action decoupling mechanism.
|
350 |
+
Therefore,
|
351 |
+
𝑒𝑛𝑡𝑎
|
352 |
+
𝑖,𝑡 = [𝑓1, · · · , 𝑓𝑧]𝑎
|
353 |
+
𝑖,𝑡
|
354 |
+
(5)
|
355 |
+
defines the entity 𝑖 as it is observed by the agent 𝑎 at the time step
|
356 |
+
𝑡. We replace the traditional observation vectors with observation
|
357 |
+
matrices with dimensions 𝑘 × 𝑧 which includes all the 𝑘 entities
|
358 |
+
observed by an agent 𝑎 at 𝑡:
|
359 |
+
O𝑎
|
360 |
+
𝑡 =
|
361 |
+
|
362 |
+
𝑒𝑛𝑡1
|
363 |
+
...
|
364 |
+
𝑒𝑛𝑡𝑘
|
365 |
+
|
366 |
+
𝑎
|
367 |
+
𝑡
|
368 |
+
=
|
369 |
+
|
370 |
+
𝑓1,1
|
371 |
+
· · ·
|
372 |
+
𝑓1,𝑧
|
373 |
+
...
|
374 |
+
...
|
375 |
+
...
|
376 |
+
𝑓𝑘,1
|
377 |
+
· · ·
|
378 |
+
𝑓𝑘,𝑧
|
379 |
+
|
380 |
+
𝑎
|
381 |
+
𝑡
|
382 |
+
(6)
|
383 |
+
This structure allows the agents to process the features of the same
|
384 |
+
type using the same weights of a parameterized matrix Emb with
|
385 |
+
shape 𝑧 ×ℎ, where ℎ is an embedding dimension. The resulting ma-
|
386 |
+
trix E𝑎
|
387 |
+
𝑡 = O𝑎
|
388 |
+
𝑡 Emb𝑎 is formed by vertices embeddings [𝑒1, · · · ,𝑒𝑘]𝑎⊤
|
389 |
+
𝑡
|
390 |
+
that will be further processed by transformers. Notice that Emb𝑎 is
|
391 |
+
independent from 𝑘. Conversely, the encoding feed-forward layer
|
392 |
+
used by RNN agents has approximately 𝑘 × 𝑧 × ℎ parameters. Our
|
393 |
+
approach is therefore more scalable and transferable in respect to
|
394 |
+
the number of entities.
|
395 |
+
The observation vectors in the cooperative environments we
|
396 |
+
studied [11, 20, 25] already contained an implicit matrix structure or
|
397 |
+
required very little modifications to adopt it. Features like (relative)
|
398 |
+
map location, velocity, remaining life points, and so on, which
|
399 |
+
are frequently defined for all entities and then concatenated in
|
400 |
+
the same vector, can be easily rethought as vertex features of our
|
401 |
+
observation matrix. On the other hand, features such as one-hot-
|
402 |
+
encoding of agent’s last action or one-hot-encoding of agent’s id
|
403 |
+
necessitate extra work. Moreover, since in our method the features
|
404 |
+
of the same types are processed by the same weights, we lose
|
405 |
+
the positional information implicitly present in the concatenated
|
406 |
+
vectors. A traditional encoder, indeed, can learn that the features in
|
407 |
+
some specific vector locations are relevant to some specific entity
|
408 |
+
and hence treat them differently from the others.
|
409 |
+
In our preliminary research, we found that we can compensate
|
410 |
+
for these drawbacks by using two additional binary features. The
|
411 |
+
first, IS_SELF, informs if the described entity is the agent to which
|
412 |
+
the observation matrix belongs:
|
413 |
+
𝑓 𝑎
|
414 |
+
𝑖,IS_SELF =
|
415 |
+
�
|
416 |
+
1,
|
417 |
+
if 𝑖 = 𝑎
|
418 |
+
0,
|
419 |
+
otherwise.
|
420 |
+
(7)
|
421 |
+
This feature will be 1 for 𝑒𝑛𝑡𝑎
|
422 |
+
𝑎,𝑡 and 0 for all the other entities.
|
423 |
+
IS_SELF can be thought as a re-adaptation of the one-hot-encoding
|
424 |
+
of the agent’s id, which is commonly employed by state-of-the-
|
425 |
+
art models [18, 26, 30]. The second feature tells us if the entity
|
426 |
+
described is a cooperative agent or not:
|
427 |
+
𝑓 𝑎
|
428 |
+
𝑖,IS_AGENT =
|
429 |
+
�
|
430 |
+
1,
|
431 |
+
if 𝑖 ∈ 𝐴
|
432 |
+
0,
|
433 |
+
otherwise
|
434 |
+
(8)
|
435 |
+
allowing the vertex features of teammates to be treated differently
|
436 |
+
than others. Even though state-of-the-art methods do not always
|
437 |
+
include this feature, we argue that we are not using additional data
|
438 |
+
because this information is otherwise implicitly encoded in vector
|
439 |
+
positions.
|
440 |
+
We apply the same reformulation of the agents’ observations to
|
441 |
+
the global state. Usually, the state is defined as a vector of “real”
|
442 |
+
features relative to the entities (i.e., not partially observed by an
|
443 |
+
agent) and/or the concatenation of all agents’ observations. In our
|
444 |
+
approach, we define a state matrix S𝑡 of dimensions 𝑘 × 𝑧:
|
445 |
+
S𝑡 =
|
446 |
+
|
447 |
+
𝑒𝑛𝑡1
|
448 |
+
...
|
449 |
+
𝑒𝑛𝑡𝑘
|
450 |
+
𝑡
|
451 |
+
=
|
452 |
+
|
453 |
+
𝑓1,1
|
454 |
+
· · ·
|
455 |
+
𝑓1,𝑧
|
456 |
+
...
|
457 |
+
...
|
458 |
+
...
|
459 |
+
𝑓𝑘,1
|
460 |
+
· · ·
|
461 |
+
𝑓𝑘,𝑧
|
462 |
+
𝑡
|
463 |
+
(9)
|
464 |
+
which defines the vertex features for all the entities from a global
|
465 |
+
point of view. For simplicity, in the notation we assume that we are
|
466 |
+
using the same 𝑧 features in both S and O. We could use different
|
467 |
+
ones, though. For instance, adding IS_SELF to S does not make
|
468 |
+
sense since the features are not defined in respect to any agent, and
|
469 |
+
|
470 |
+
indeed in our experiments we do exclude IS_SELF from S. In the
|
471 |
+
environments that we took into account, the state vectors shown a
|
472 |
+
structure easily reshapable as in equation (9). As for O, we can pro-
|
473 |
+
cess the same feature types in parallel with a parameterized matrix
|
474 |
+
Emb𝑠 to obtain embedded vertices that can be further processed by
|
475 |
+
a transformer, i.e. E𝑡 = [𝑒1, · · ·𝑒𝑘]⊤
|
476 |
+
𝑡 = S𝑡Emb𝑠.
|
477 |
+
4.2
|
478 |
+
Transformer Agent
|
479 |
+
Our transformer agent takes as input the embedded vertices E𝑎
|
480 |
+
𝑡 =
|
481 |
+
[𝑒1, · · · ,𝑒𝑘]𝑎⊤
|
482 |
+
𝑡
|
483 |
+
plus a hidden vector ℎ𝑎
|
484 |
+
𝑡−1, which has the same size
|
485 |
+
of any vector 𝑒𝑎
|
486 |
+
𝑖 and it is fullfilled with 0s at the beginning of an
|
487 |
+
episode. The final input matrix is X𝑎
|
488 |
+
𝑡 = [ℎ𝑎
|
489 |
+
𝑡−1,𝑒𝑎
|
490 |
+
1,𝑡, · · · ,𝑒𝑎
|
491 |
+
𝑘,𝑡]⊤. The
|
492 |
+
output of 𝑙 transformer blocks: ˜X𝑎
|
493 |
+
𝑡 = MultiHeadSelfAttn(X𝑎
|
494 |
+
𝑡 ) is a
|
495 |
+
refined graph in which all the vertices were altered based on the
|
496 |
+
attention given to the others. In particular, ℎ𝑎
|
497 |
+
𝑡 = ˜ℎ𝑎
|
498 |
+
𝑡−1 can be con-
|
499 |
+
sidered as a transformation of the agent’s hidden state according
|
500 |
+
to the attention given to the new state of the entities. Similarly
|
501 |
+
to the approach used in natural language processing, where the
|
502 |
+
transformation of the first token ( [CLS] in Bert [3]) is considered to
|
503 |
+
encode an entire sentence, we consider ℎ𝑎
|
504 |
+
𝑡 to encode the general co-
|
505 |
+
ordination reasoning of an agent. We therefore sample the agent’s
|
506 |
+
actions-values from ℎ𝑎
|
507 |
+
𝑡 using a feed-forward layer W𝑢 with dimen-
|
508 |
+
sions ℎ × 𝑢, where 𝑢 is the number of actions: 𝑄𝑎(𝜏𝑎, ·) = ℎ𝑎
|
509 |
+
𝑡 W𝑢.
|
510 |
+
Finally, we passℎ𝑎
|
511 |
+
𝑡 to the next time step so that the agent can update
|
512 |
+
its coordination reasoning recurrently. When some agent’s actions
|
513 |
+
are directly related to some of the observed entities (e.g., “attack the
|
514 |
+
enemy 𝑖” in StarCraft II), our transformer agents use a decoupling
|
515 |
+
mechanism similar to the one introduced in [5]. In particular, the
|
516 |
+
action-values of the entity-related actions are derived from their
|
517 |
+
respective entity embeddings. An additional feed-forward matrix
|
518 |
+
W ˆ𝑢 of dimension ℎ × 1 is used in this case. For example, the q-
|
519 |
+
value of attacking the enemy 𝑖 is sampled as ˜𝑒𝑎
|
520 |
+
𝑖,𝑡W ˆ𝑢. The q-values
|
521 |
+
of the non-entity-related and the entity-related actions are then
|
522 |
+
concatenated together to obtain 𝑄𝑎(𝜏𝑎, ·).
|
523 |
+
4.3
|
524 |
+
Transformer Mixer
|
525 |
+
Exactly as QMix, TransfQMix uses a MLP in order to project 𝑄𝐴
|
526 |
+
(the q-values of the actions sampled by the individual agents) over
|
527 |
+
𝑄𝑡𝑜𝑡 (the q-value of the joint sampled action). Formally:
|
528 |
+
𝑄𝑡𝑜𝑡 = (𝑄 (1×𝑛)
|
529 |
+
𝐴
|
530 |
+
W1(𝑛×ℎ) + b1(1×ℎ))W2(ℎ×1) + b2(1×1)
|
531 |
+
(10)
|
532 |
+
where W1, b1 and W2, b2 are the weights and biases of the hidden
|
533 |
+
and output layer, respectively. We explicitly state inside brackets
|
534 |
+
the dimensions of equation 10 to show that only three values are
|
535 |
+
relevant: 𝑛, the number of agents; ℎ, a hidden dimension; and 1,
|
536 |
+
which accounts for 𝑄𝑡𝑜𝑡 being a scalar. This shows that in order to
|
537 |
+
arrange the MLP mixer we need 𝑛 + 2 vectors of size ℎ plus a scalar.
|
538 |
+
QMix generates the vectors using 4 MLP hypernetworks. We
|
539 |
+
propose to use the outputs of a transformer to generate the weights
|
540 |
+
of the mixer’s MLP. The input graph of our transformer mixer is:
|
541 |
+
X𝑡 =
|
542 |
+
�
|
543 |
+
ℎ1
|
544 |
+
𝑡, · · · ,ℎ𝑛
|
545 |
+
𝑡 ,𝑤b1
|
546 |
+
𝑡−1,𝑤W2
|
547 |
+
𝑡−1,𝑤b2
|
548 |
+
𝑡−1,𝑒1,𝑡, · · · ,𝑒𝑘,𝑡
|
549 |
+
�⊤
|
550 |
+
(11)
|
551 |
+
where ℎ1
|
552 |
+
𝑡, · · · ,ℎ𝑛
|
553 |
+
𝑡 are the 𝑛 hidden states of the agents, 𝑤b1
|
554 |
+
𝑡−1, 𝑤W2
|
555 |
+
𝑡−1,
|
556 |
+
𝑤b2
|
557 |
+
𝑡−1 are three recurrent vectors fulfilled with 0s at the beginning
|
558 |
+
of an episode, and 𝑒1,𝑡, · · · ,𝑒𝑘,𝑡 is the embedded state, i.e., E𝑡 =
|
559 |
+
S𝑡Emb𝑠. The output consist in a matrix ˜X𝑡 = MultiHeadSelfAttn(X𝑡)
|
560 |
+
that contains the same vertices of X𝑡 transformed by the multi-head
|
561 |
+
self-attention mechanism. In particular, ˜ℎ1
|
562 |
+
𝑡, · · · , ˜ℎ𝑛
|
563 |
+
𝑡 are the coordi-
|
564 |
+
nation reasonings of agents enhanced by global information to
|
565 |
+
which the agents had no access, namely the hidden state of the
|
566 |
+
other agents and the true state of the environment. These 𝑛 refined
|
567 |
+
vectors are used to build W1. 𝑄𝐴W1 is therefore a re-projection
|
568 |
+
of the individual q-values 𝑄𝐴 over a transformation of the agents’
|
569 |
+
hidden states. Notice that the individual q-values were generated
|
570 |
+
(or conditioned) exactly from ℎ1
|
571 |
+
𝑡, · · · ,ℎ𝑛
|
572 |
+
𝑡 by the agents. This means
|
573 |
+
that the primary goal of the transformer mixer is to combine and
|
574 |
+
refine the independent agents’ reasoning so that they represent the
|
575 |
+
team coordination.
|
576 |
+
The transformed embeddings of the recurrent vectors, 𝑤b1
|
577 |
+
𝑡
|
578 |
+
=
|
579 |
+
˜𝑤b1
|
580 |
+
𝑡−1, 𝑤W2
|
581 |
+
𝑡
|
582 |
+
= ˜𝑤W2
|
583 |
+
𝑡−1, 𝑤b2
|
584 |
+
𝑡
|
585 |
+
= ˜𝑤b2
|
586 |
+
𝑡−1 are used to generate b1, W2,
|
587 |
+
b2, respectively. Since b2 is a scalar, an additional parameterized
|
588 |
+
matrix with dimensions ℎ × 1 is applied on 𝑤b2
|
589 |
+
𝑡 . We use a recurrent
|
590 |
+
mechanism for two reasons: (i) to ensure that the transformer mixer
|
591 |
+
is totally independent of the number of entities in the environment;
|
592 |
+
and (ii) to incorporate a temporal dependence on the centralized
|
593 |
+
training, in accordance with the MDP formulation of the problem.
|
594 |
+
We argue that 𝑄𝑡𝑜𝑡 is heavily dependent on prior states and that
|
595 |
+
this reliance should be encoded explicitly on the mixer network.
|
596 |
+
This recurrent process allows the mixer to provide more consistent
|
597 |
+
targets across time steps, resulting in more stable training.
|
598 |
+
We employ the same strategy described by QMix to adhere to the
|
599 |
+
monotonicity constraint. Namely, we apply an absolute activation
|
600 |
+
function to the weights W1 and W2 and 𝑟𝑒𝑙𝑢 to b2.
|
601 |
+
5
|
602 |
+
EXPERIMENTAL SETUP
|
603 |
+
5.1
|
604 |
+
Spread
|
605 |
+
In the Spread environment [11], the goal of 𝑛 agents is to move as
|
606 |
+
close as possible to the random positions occupied by 𝑛 landmarks
|
607 |
+
while avoiding collisions with each other. The agents can move
|
608 |
+
in four directions or stay still. The optimal policy would have one
|
609 |
+
agent occupying one landmark, resulting in a perfect space distri-
|
610 |
+
bution. Since each agent must anticipate which target the other
|
611 |
+
agents will occupy and proceed to the remaining one, this calls for
|
612 |
+
robust coordination reasoning.
|
613 |
+
The global reward is the negative minimum distances from each
|
614 |
+
landmark to any agent. An additional term is added to punish
|
615 |
+
collisions among agents. It must be noticed that the original reward
|
616 |
+
function implemented by [11] was affected by a redundant factor,
|
617 |
+
i.e. it is multiplied by 2𝑛. Later on, PettingZoo [22] eliminated this
|
618 |
+
redundancy, which is the reward function we used here.
|
619 |
+
The Spread’s observation space for the agent 𝑎 consists of a vec-
|
620 |
+
tor containing the velocity and absolute position of itself together
|
621 |
+
with the relative positions of all the other agents and landmarks.
|
622 |
+
In order to convert it into an observation matrix, we only main-
|
623 |
+
tain the relative positions, which are the features defined for all
|
624 |
+
the entities observed by 𝑎. Every observed entity is therefore de-
|
625 |
+
fined by 𝑒𝑛𝑡𝑎
|
626 |
+
𝑖,𝑡 = [𝑝𝑜𝑠𝑥, 𝑝𝑜𝑠𝑦, IS_SELF, IS_AGENT]𝑎
|
627 |
+
𝑖,𝑡 where 𝑝𝑜𝑠𝑥
|
628 |
+
and 𝑝𝑜𝑠𝑦 are the relative positions of the entity 𝑖 in respect to 𝑎 in
|
629 |
+
the horizontal and vertical axes.
|
630 |
+
|
631 |
+
0
|
632 |
+
0.5M
|
633 |
+
1M
|
634 |
+
1.5M
|
635 |
+
2M
|
636 |
+
0.2
|
637 |
+
0.4
|
638 |
+
0.6
|
639 |
+
0.8
|
640 |
+
1
|
641 |
+
TransfQmix
|
642 |
+
Qtran
|
643 |
+
Qplex
|
644 |
+
Qmix
|
645 |
+
OwQmix
|
646 |
+
CwQmix
|
647 |
+
Time Steps
|
648 |
+
Test Occupied Landmarks %
|
649 |
+
(a) 3 Agents, 3 Landmarks
|
650 |
+
0
|
651 |
+
0.5M
|
652 |
+
1M
|
653 |
+
1.5M
|
654 |
+
2M
|
655 |
+
0.2
|
656 |
+
0.4
|
657 |
+
0.6
|
658 |
+
0.8
|
659 |
+
1
|
660 |
+
Time Steps
|
661 |
+
(b) 4 Agents, 4 Landmarks
|
662 |
+
0
|
663 |
+
0.5M
|
664 |
+
1M
|
665 |
+
1.5M
|
666 |
+
2M
|
667 |
+
0.2
|
668 |
+
0.4
|
669 |
+
0.6
|
670 |
+
0.8
|
671 |
+
1
|
672 |
+
Time Steps
|
673 |
+
(c) 5 Agents, 5 Landmarks
|
674 |
+
0
|
675 |
+
0.5M
|
676 |
+
1M
|
677 |
+
1.5M
|
678 |
+
2M
|
679 |
+
0.2
|
680 |
+
0.4
|
681 |
+
0.6
|
682 |
+
0.8
|
683 |
+
1
|
684 |
+
Time Steps
|
685 |
+
(d) 6 Agents, 6 Landmarks
|
686 |
+
Figure 3: Comparative results in the Spread environment.
|
687 |
+
The Spread’s state space consist of the concatenation of all the
|
688 |
+
agents observations. Also in this case we keep the features that
|
689 |
+
are defined for all the entities, which are the absolute positions
|
690 |
+
and the velocities. In the final state matrix the entities are defined
|
691 |
+
by 𝑒𝑛𝑡𝑖,𝑡 = [ ˆ
|
692 |
+
𝑝𝑜𝑠𝑥, ˆ
|
693 |
+
𝑝𝑜𝑠𝑦, 𝑣𝑥, 𝑣𝑦, IS_AGENT]𝑖,𝑡 where
|
694 |
+
ˆ
|
695 |
+
𝑝𝑜𝑠𝑥 and
|
696 |
+
ˆ
|
697 |
+
𝑝𝑜𝑠𝑦
|
698 |
+
are the absolute position of the entity 𝑖, and 𝑣𝑥 and 𝑣𝑦 its velocity
|
699 |
+
(which is 0 in the case of the landmarks).
|
700 |
+
The standard reported metric for Spread is the global reward.
|
701 |
+
This metric, however, is not informative because it is a value that is
|
702 |
+
challenging to interpret and does not stay in the same range when
|
703 |
+
𝑛 changes. As a result, we present a new metric: the percentage
|
704 |
+
of landmarks occupied at the conclusion of an episode (POL). To
|
705 |
+
compute the POL we count the number of landmarks with an agent
|
706 |
+
closer than a predetermined threshold and we divide it for the total
|
707 |
+
number of landmarks. The POL is a more informative metric be-
|
708 |
+
cause it assesses the proper distribution of the agents. Additionally,
|
709 |
+
it maintains the same range (0, 1) when 𝑛 is changed. We found that
|
710 |
+
when the distance threshold is set to 0.3, the POL has a correlation
|
711 |
+
of 0.95 with the reward function, meaning that the data we are
|
712 |
+
presenting is still comparable with previous studies.
|
713 |
+
5.2
|
714 |
+
StarCraft II
|
715 |
+
This environment uses the StarCraft II Learning Environment [25],
|
716 |
+
which makes available a range of micromanagement tasks based
|
717 |
+
on the well-known real-time strategy game StarCraft II1. Each task
|
718 |
+
consists of a unique combat scenario in which a group of agents,
|
719 |
+
each managing a single unit, engage an army under the command
|
720 |
+
of the StarCraft game’s central AI. In order to win a game, agents
|
721 |
+
must develop coordinated action sequences that will allow them to
|
722 |
+
concentrate their attention on certain enemy units. We report the
|
723 |
+
results in SC2 for the 8 tasks that are considered the most difficult in
|
724 |
+
the literature [26, 30]: 5m_vs_6m, 8m_vs_9m, 27m_vs_30m, 5s10z,
|
725 |
+
3s5z_vs_3s6z, 6h_vs_8z, MMM2, and corridor.
|
726 |
+
The SC2’s observation vector for the agent 𝑎 consists in a con-
|
727 |
+
catenation of features defined for the allies and the enemies that
|
728 |
+
are inside the sight range of the agent. These features include the
|
729 |
+
relative position of the entity in respect to 𝑎, the distance, the health,
|
730 |
+
the state of the shield, and a one-hot-encoding of the type of the
|
731 |
+
entity (which can be a marine, a marauder, a stalker, etc.). This
|
732 |
+
structure already defines an observation matrix which requires
|
733 |
+
only the addition of the IS_SELF and IS_AGENT features to be used
|
734 |
+
by TransfQMix. However, TransfQMix can not use some additional
|
735 |
+
features that are present in the original SC2’s observation vector,
|
736 |
+
1StarCraft II is a trademark of Blizzard EntertainmentTM
|
737 |
+
which include a one-hot-encoding of the available and previous
|
738 |
+
actions and a representation of the map’s limits.
|
739 |
+
Our transformer mixer can be fed directly with the original state
|
740 |
+
vector of SC2, which is also a concatenation of features defined for
|
741 |
+
all 𝑘 entities. These features are the same of the observation vector
|
742 |
+
but defined from a global viewpoint, i.e., the position relative to
|
743 |
+
the center of the map. On the other hand, an additional feature
|
744 |
+
consisting of the actions taken by all the agents is not used by
|
745 |
+
TransfQMix since it is not compatible with the graph state approach.
|
746 |
+
The decoupling technique described in Section 4.2 is employed
|
747 |
+
for TransfQMix and UPDET, i.e., the q-value of attacking the enemy
|
748 |
+
𝑖 is determined from the transformer embedding of 𝑖. When appro-
|
749 |
+
priate, the same process is used for actions that include healing
|
750 |
+
another agent.
|
751 |
+
5.3
|
752 |
+
Algorithms
|
753 |
+
Our codebase is built on top of pymarl [4, 18] and it is available at:
|
754 |
+
https://github.com/mttga/pymarl_transformers. It contains
|
755 |
+
TransfQMix and our wrappers for Spread and SC2, plus the original
|
756 |
+
implementations of the algorithms to which our method is com-
|
757 |
+
pared to: QMix, QTran, QPlex, OW-QMix, CW-QMix and UPDET.
|
758 |
+
For all the compared methods, we used the same hyper-parameters
|
759 |
+
reported in the original implementations. We kept the parameters
|
760 |
+
of each method constant in all the experiments we performed. No-
|
761 |
+
tice that in all our experiments we used the parameters sharing
|
762 |
+
technique, i.e., all the agents shared the same weights. This was
|
763 |
+
demonstrated to be very beneficial in several studies [14, 18, 30].
|
764 |
+
We fine-tuned TransfQMix in the SC2’s 5m_vs_6m task and we
|
765 |
+
used the same parameters for all the other settings (included the
|
766 |
+
Spread tasks). In particular, we used 32 as the hidden embedding
|
767 |
+
dimension, 4 attention heads, and 2 transformer blocks for both
|
768 |
+
the transformer agents and the mixer, resulting in a total of ∼ 50𝑘
|
769 |
+
parameters for both networks. The learning configuration for all
|
770 |
+
the transformers architectures (included UPDET) used the 𝐴𝑑𝑎𝑚
|
771 |
+
optimizer with a learning rate of 0.001 and a 𝜆 of 0.6 for comput-
|
772 |
+
ing the twin delayed (td) targets. This setup is different from the
|
773 |
+
one used by the state-of-the-art RNN-based models (𝑅𝑀𝑆𝑃𝑟𝑜𝑝 op-
|
774 |
+
timizer, 0.0005 learning rate, and 0 for td’s 𝜆). However, we found
|
775 |
+
that the optimal learning configuration of TransfQMix did not work
|
776 |
+
with the other models, i.e., they performed better with their original
|
777 |
+
learning setup. Some parameters were shared by all the methods,
|
778 |
+
such as the buffer size (5000 episodes), the batch size (32 episodes),
|
779 |
+
the interval for updating the target network (200 episodes), and the
|
780 |
+
anneal time for the epsilon decay (100𝑘 time steps).
|
781 |
+
|
782 |
+
0
|
783 |
+
0.5M
|
784 |
+
1M
|
785 |
+
1.5M
|
786 |
+
2M
|
787 |
+
0.2
|
788 |
+
0.4
|
789 |
+
0.6
|
790 |
+
0.8
|
791 |
+
1
|
792 |
+
TransfQmix
|
793 |
+
Qtran
|
794 |
+
Qplex
|
795 |
+
Qmix
|
796 |
+
OwQmix
|
797 |
+
CwQmix
|
798 |
+
Updet
|
799 |
+
Time Steps
|
800 |
+
Test Win Rate %
|
801 |
+
(a) 5m_vs_6m
|
802 |
+
0
|
803 |
+
0.5M
|
804 |
+
1M
|
805 |
+
1.5M
|
806 |
+
2M
|
807 |
+
0.2
|
808 |
+
0.4
|
809 |
+
0.6
|
810 |
+
0.8
|
811 |
+
1
|
812 |
+
Time Steps
|
813 |
+
(b) 8m_vs_9m
|
814 |
+
0
|
815 |
+
0.5M
|
816 |
+
1M
|
817 |
+
1.5M
|
818 |
+
2M
|
819 |
+
0.2
|
820 |
+
0.4
|
821 |
+
0.6
|
822 |
+
0.8
|
823 |
+
1
|
824 |
+
Time Steps
|
825 |
+
(c) 27m_vs_30m
|
826 |
+
0
|
827 |
+
0.5M
|
828 |
+
1M
|
829 |
+
1.5M
|
830 |
+
2M
|
831 |
+
0.2
|
832 |
+
0.4
|
833 |
+
0.6
|
834 |
+
0.8
|
835 |
+
1
|
836 |
+
Time Steps
|
837 |
+
(d) 6h_vs_8z
|
838 |
+
0
|
839 |
+
0.5M
|
840 |
+
1M
|
841 |
+
1.5M
|
842 |
+
2M
|
843 |
+
0.2
|
844 |
+
0.4
|
845 |
+
0.6
|
846 |
+
0.8
|
847 |
+
1
|
848 |
+
Time Steps
|
849 |
+
Test Win Rate %
|
850 |
+
(e) 5s10z
|
851 |
+
0
|
852 |
+
0.5M
|
853 |
+
1M
|
854 |
+
1.5M
|
855 |
+
2M
|
856 |
+
0.2
|
857 |
+
0.4
|
858 |
+
0.6
|
859 |
+
0.8
|
860 |
+
1
|
861 |
+
Time Steps
|
862 |
+
(f) 3s5z_vs_3s6z
|
863 |
+
0
|
864 |
+
0.5M
|
865 |
+
1M
|
866 |
+
1.5M
|
867 |
+
2M
|
868 |
+
0.2
|
869 |
+
0.4
|
870 |
+
0.6
|
871 |
+
0.8
|
872 |
+
1
|
873 |
+
Time Steps
|
874 |
+
(g) MM2
|
875 |
+
0
|
876 |
+
0.5M
|
877 |
+
1M
|
878 |
+
1.5M
|
879 |
+
2M
|
880 |
+
0.2
|
881 |
+
0.4
|
882 |
+
0.6
|
883 |
+
0.8
|
884 |
+
1
|
885 |
+
Time Steps
|
886 |
+
(h) corridor
|
887 |
+
Figure 4: Comparative results in the SC2 environment.
|
888 |
+
6
|
889 |
+
RESULTS AND DISCUSSION
|
890 |
+
6.1
|
891 |
+
Main Results
|
892 |
+
The performances of MARL methods in Spread are usually reported
|
893 |
+
using𝑛 = 3. We increased𝑛 up to 6 in order to analyze the scalability
|
894 |
+
of the methods. Figure 3 shows how the POL improved when the
|
895 |
+
considered methods were trained on the various scenarios. The POL
|
896 |
+
was computed every 40𝑘 time steps by running 30 independent
|
897 |
+
episodes with each agent performing greedy decentralised action
|
898 |
+
selection. In the standard task involving 3 agents, state-of-the-art
|
899 |
+
methods learned a good policy which covers on average the ∼80%
|
900 |
+
of the landmarks, with the exception of QTran and CW-QMix (POL
|
901 |
+
of ∼50%). However, they did not perform significantly better than
|
902 |
+
QMix. The sole state-of-the-art method that could defeat QMix in
|
903 |
+
the tasks involving 4 or 5 agents was QPlex (POL of 50%), which
|
904 |
+
demonstrated to be very unstable in 𝑛 = 6. Conversely, TransfQMix
|
905 |
+
significantly outperformed QMix and the other methods in every
|
906 |
+
scenario, reaching a steady POL of almost 90% in just 500𝑘 time
|
907 |
+
steps. Notice that in Spread the optimal policy was the same for
|
908 |
+
every 𝑛 (i.e., each agent occupying a landmark). State-of-the-art
|
909 |
+
methods could learn this strategy only when the team size was
|
910 |
+
small. On the other hand, TransfQMix demonstrated a better agent-
|
911 |
+
team size invariance by obtaining similar results in every scenario.
|
912 |
+
Figure 4 shows the results of all the methods in the hardest tasks
|
913 |
+
of SC2. The reported metric is the average percentage of won games
|
914 |
+
performing greedy action sampling every 100 episodes during train-
|
915 |
+
ing. The results for UPDET are reported only for tasks that include
|
916 |
+
marines, since the original implementation of this method does not
|
917 |
+
support other scenarios. It is noteworthy that UPDET did not per-
|
918 |
+
form better than RNN-based models and failed in the 27m_vs_30m
|
919 |
+
task, indicating that using a transformer agent with policy decou-
|
920 |
+
pling does not necessarily provide a clear advantage. Conversely,
|
921 |
+
our more sophisticated use of transformers significantly outper-
|
922 |
+
formed the other models in every task, and consistently defeats the
|
923 |
+
SC2’s central AI even in scenarios where previous methods could
|
924 |
+
not win any game. TransfQMix also demonstrated its effectiveness
|
925 |
+
Table 1: Results of zero-shot transfer in Spread.
|
926 |
+
POL Scenario
|
927 |
+
Model
|
928 |
+
3v3
|
929 |
+
4v4
|
930 |
+
5v5
|
931 |
+
6v6
|
932 |
+
TransfQMix (3v3)
|
933 |
+
0.98
|
934 |
+
0.88
|
935 |
+
0.8
|
936 |
+
0.75
|
937 |
+
TransfQMix (4v4)
|
938 |
+
0.96
|
939 |
+
0.93
|
940 |
+
0.9
|
941 |
+
0.86
|
942 |
+
TransfQMix (5v5)
|
943 |
+
0.88
|
944 |
+
0.85
|
945 |
+
0.82
|
946 |
+
0.82
|
947 |
+
TransfQMix (6v6)
|
948 |
+
0.91
|
949 |
+
0.88
|
950 |
+
0.85
|
951 |
+
0.84
|
952 |
+
TransfQMix (CL)
|
953 |
+
0.88
|
954 |
+
0.88
|
955 |
+
0.87
|
956 |
+
0.87
|
957 |
+
State-of-the-art
|
958 |
+
0.76
|
959 |
+
0.45
|
960 |
+
0.36
|
961 |
+
0.33
|
962 |
+
in environments with a large number of entities, such as the corri-
|
963 |
+
dor map (which stays for 6 Zealots versus 24 Zerglings, for a total
|
964 |
+
of 30 entities) and the 27m_vs_30m (57 entities). While other ap-
|
965 |
+
proaches require their parameters to be increased according to the
|
966 |
+
number of entities, TransfQMix’s networks are (nearly) the same
|
967 |
+
size in all the tasks. This suggests that TransfQMix’s architecture
|
968 |
+
may be regarded as sufficiently generic to address various problems
|
969 |
+
without requiring structural changes.
|
970 |
+
6.2
|
971 |
+
Transfer Learning
|
972 |
+
We tested the zero-shot capabilities of TransfQMix by applying
|
973 |
+
the networks trained in a particular Spread task to the others. Ta-
|
974 |
+
ble 1 shows the POL averaged across 1000 episodes achieved by
|
975 |
+
TransfQMix trained with 𝑛 agents in scenarios with different 𝑛.
|
976 |
+
As a benchmark, the best POLs obtained by state-of-the-art mod-
|
977 |
+
els trained in each specific task are reported. We also include the
|
978 |
+
performances of TransfQMix trained with a curriculum learning
|
979 |
+
(CL) approach, which consists of making the agents cooperate in
|
980 |
+
progressively larger teams. In particular, we trained the agents in
|
981 |
+
teams of 3, 4, 5 and 6 for 500𝑘 time steps each.
|
982 |
+
In general, every network showed excellent zero-shot capabili-
|
983 |
+
ties but worse performances for larger teams, except for the agents
|
984 |
+
trained with CL, which performed similarly in all the scenarios.
|
985 |
+
|
986 |
+
0
|
987 |
+
0.5M
|
988 |
+
1M
|
989 |
+
1.5M
|
990 |
+
2M
|
991 |
+
0.2
|
992 |
+
0.4
|
993 |
+
0.6
|
994 |
+
0.8
|
995 |
+
1
|
996 |
+
TransferLearning
|
997 |
+
Scratch
|
998 |
+
Time Steps
|
999 |
+
Test Win Rate %
|
1000 |
+
(a) 8m_vs_9m to 5m_vs_6m
|
1001 |
+
0
|
1002 |
+
0.5M
|
1003 |
+
1M
|
1004 |
+
1.5M
|
1005 |
+
2M
|
1006 |
+
0.2
|
1007 |
+
0.4
|
1008 |
+
0.6
|
1009 |
+
0.8
|
1010 |
+
1
|
1011 |
+
Time Steps
|
1012 |
+
(b) 5s10z to 3s5z_vs_3s6z
|
1013 |
+
Figure 5: Transfer learning vs training from scratch in 2 SC2 tasks.
|
1014 |
+
In this sense, CL seems a promising approach for obtaining gen-
|
1015 |
+
eral coordination policies with TransfQMix. Surprisingly, the best
|
1016 |
+
transferable policy was learned in the 4v4 task. This could be be-
|
1017 |
+
cause the scenario is complex enough to necessitate the learning of
|
1018 |
+
strong coordination policies, but not so complicated as to produce
|
1019 |
+
instabilities or slow down the learning process. Finally, it is remark-
|
1020 |
+
able that all the TransfQMix’s zero-shot transfers outperformed
|
1021 |
+
state-of-the-art methods trained in the various scenarios.
|
1022 |
+
The only constraint to using TransfQMix in different contexts is
|
1023 |
+
that the vertex feature space must be the same. This is not always
|
1024 |
+
guaranteed in the SC2 environment, because the unit type’s one-hot
|
1025 |
+
encoding feature is dependent on the total number of unit types
|
1026 |
+
of the scenario. Nonetheless, we can utilize the same networks in
|
1027 |
+
maps with the same entity type. Figure 5 shows the results obtained
|
1028 |
+
in the 5m_vs_6m and 3s5z_vs_3s6z tasks by fine-tuning the agents
|
1029 |
+
trained in the 8m_vs_9m and 5s10z scenarios, respectively.
|
1030 |
+
In both cases, we are transferring coordination strategies learnt
|
1031 |
+
in simpler settings to more difficult tasks, implying that we are con-
|
1032 |
+
ducting a minimal CL. We can see how fine-tuning helped Trans-
|
1033 |
+
fQMix develop a significantly better policy (Figure 5b) or converge
|
1034 |
+
faster (Figure 5a) than when it was trained from scratch. The initial
|
1035 |
+
peak in the figures corresponds to the zero-shot performance, and
|
1036 |
+
it is followed by a falling phase in which the weights were rapidly
|
1037 |
+
adjusted for the new task.
|
1038 |
+
In conclusion, the results demonstrate TransfQMix’s promising
|
1039 |
+
capacity to transfer knowledge between scenarios, as well as how
|
1040 |
+
transfer and curriculum learning could aid in the resolution of
|
1041 |
+
complex MARL tasks.
|
1042 |
+
6.3
|
1043 |
+
Ablation
|
1044 |
+
0
|
1045 |
+
0.5M
|
1046 |
+
1M
|
1047 |
+
1.5M
|
1048 |
+
2M
|
1049 |
+
0.2
|
1050 |
+
0.4
|
1051 |
+
0.6
|
1052 |
+
0.8
|
1053 |
+
1
|
1054 |
+
TransfQmix
|
1055 |
+
QmixTransfMixer
|
1056 |
+
QmixTransfAgent
|
1057 |
+
QmixGraphState
|
1058 |
+
Qmix
|
1059 |
+
Time Steps
|
1060 |
+
Test Win Rate %
|
1061 |
+
(a) SC2: 5m_vs_6m
|
1062 |
+
0
|
1063 |
+
0.5M
|
1064 |
+
1M
|
1065 |
+
1.5M
|
1066 |
+
2M
|
1067 |
+
0.2
|
1068 |
+
0.4
|
1069 |
+
0.6
|
1070 |
+
0.8
|
1071 |
+
1
|
1072 |
+
Time Steps
|
1073 |
+
Test Occupied Landmarks %
|
1074 |
+
(b) Spread
|
1075 |
+
Figure 6: Ablation Study.
|
1076 |
+
It might be claimed that the results obtained by TransfQMix
|
1077 |
+
in Spread are not comparable with the previous methods because
|
1078 |
+
we employ a state observation matrix that differs from the orig-
|
1079 |
+
inal state vector. To test this argument, we ran QMix with the
|
1080 |
+
flattened version of our state matrix in Spread. The averaged test
|
1081 |
+
POL across all Spread tasks is shown in Figure 6b. We can see that
|
1082 |
+
QMix’s performance with a graph state (QMixGraphState) was not
|
1083 |
+
considerably different from QMix’s performance with the origi-
|
1084 |
+
nal state vector. The same figure shows that replacing the QMix
|
1085 |
+
mixer’s hypernetworks with a transformer mixer improved per-
|
1086 |
+
formance (QMixTransformerMixer). This indicates that, in order
|
1087 |
+
to benefit from a graph-based state, a graph-based network as a
|
1088 |
+
transformer should be used. We also provide the results produced
|
1089 |
+
by our transformer agents in conjunction with the traditional QMix
|
1090 |
+
hypernetworks. This framework clearly outperformed the original
|
1091 |
+
one based on RNNs in terms of coordination, but it was not as stable
|
1092 |
+
or performant as TransfQMix.
|
1093 |
+
The identical ablation study was carried out in the SC2 5m_vs_6m
|
1094 |
+
task (Figure 6a). In this scenario, the transformer agents and mixers
|
1095 |
+
alone were unable to increase the performance of QMix, implying
|
1096 |
+
that we need to utilize transformers in both the agent and mixer net-
|
1097 |
+
works in order to leverage the graph structure of the observations
|
1098 |
+
and state.
|
1099 |
+
7
|
1100 |
+
CONCLUSION
|
1101 |
+
In this paper we proposed a novel graph-based formalization of
|
1102 |
+
MARL problems that depicts coordination problems in a more natu-
|
1103 |
+
ral way. We introduced TransfQMix, a method based on transform-
|
1104 |
+
ers that makes use of this structure to enhance the coordination rea-
|
1105 |
+
soning of the QMix’s agents and mixer. TransfQMix demonstrated
|
1106 |
+
great learning capabilities by excelling in the most challenging
|
1107 |
+
SC2 and Spread tasks without the need for task-specific hyper-
|
1108 |
+
parameter tuning. In contrast to prior approaches that attempted
|
1109 |
+
to enhance QMix, TransfQMix does not focus on the monotonicity
|
1110 |
+
constraint or other aspects of the learning process. This shows that
|
1111 |
+
in order to improve MARL methods, neural networks architectures
|
1112 |
+
and environment representations need to receive greater focus.
|
1113 |
+
The application of TransfQMix in transfer learning, zero-shot
|
1114 |
+
transfer, and curricular learning yielded promising results. In future
|
1115 |
+
research we aim to explore the method’s generalization abilities
|
1116 |
+
by including several tasks into a single learning pipeline. For in-
|
1117 |
+
stance, we aim to train the same agents to solve all the SC2 tasks.
|
1118 |
+
Additionally, we want to investigate the feasibility of transferring
|
1119 |
+
coordination policies between MARL domains. Finally, we want to
|
1120 |
+
examine in greater detail the influence of multi-head self-attention
|
1121 |
+
on coordination reasoning.
|
1122 |
+
ACKNOWLEDGMENTS
|
1123 |
+
This project has received funding from the EU’s Horizon 2020
|
1124 |
+
research and innovation programme under the Marie Skłodowska-
|
1125 |
+
Curie grant agreement No 893089. This work acknowledges the
|
1126 |
+
‘Severo Ochoa Centre of Excellence’ accreditation (CEX2019-000928-
|
1127 |
+
S). We gratefully acknowledge the David and Lucile Packard Foun-
|
1128 |
+
dation.
|
1129 |
+
|
1130 |
+
REFERENCES
|
1131 |
+
[1] Akshat Agarwal, Sumit Kumar, Katia Sycara, and Michael Lewis. 2020. Learning
|
1132 |
+
Transferable Cooperative Behavior in Multi-Agent Teams. In Proceedings of the
|
1133 |
+
19th International Conference on Autonomous Agents and MultiAgent Systems
|
1134 |
+
(Auckland, New Zealand) (AAMAS ’20). 1741–1743.
|
1135 |
+
[2] Dzmitry Bahdanau, Kyung Hyun Cho, and Yoshua Bengio. 2015. Neural machine
|
1136 |
+
translation by jointly learning to align and translate. 3rd International Conference
|
1137 |
+
on Learning Representations, ICLR 2015 ; Conference date: 07-05-2015 Through
|
1138 |
+
09-05-2015.
|
1139 |
+
[3] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT:
|
1140 |
+
Pre-training of Deep Bidirectional Transformers for Language Understanding. In
|
1141 |
+
Proceedings of the 2019 Conference of the North American Chapter of the Association
|
1142 |
+
for Computational Linguistics: Human Language Technologies, Volume 1 (Long and
|
1143 |
+
Short Papers). Association for Computational Linguistics, Minneapolis, Minnesota,
|
1144 |
+
4171–4186. https://doi.org/10.18653/v1/N19-1423
|
1145 |
+
[4] Jakob Foerster, Gregory Farquhar, Triantafyllos Afouras, Nantas Nardelli, and Shi-
|
1146 |
+
mon Whiteson. 2018. Counterfactual multi-agent policy gradients. In Proceedings
|
1147 |
+
of the AAAI conference on artificial intelligence, Vol. 32.
|
1148 |
+
[5] Siyi Hu, Fengda Zhu, Xiaojun Chang, and Xiaodan Liang. 2020. UPDeT: Univer-
|
1149 |
+
sal Multi-agent RL via Policy Decoupling with Transformers. In International
|
1150 |
+
Conference on Learning Representations.
|
1151 |
+
[6] Maximilian Hüttenrauch, Sosic Adrian, Gerhard Neumann, et al. 2019. Deep
|
1152 |
+
reinforcement learning for swarm systems. Journal of Machine Learning Research
|
1153 |
+
20, 54 (2019), 1–31.
|
1154 |
+
[7] Landon Kraemer and Bikramjit Banerjee. 2016. Multi-agent reinforcement learn-
|
1155 |
+
ing as a rehearsal for decentralized planning. Neurocomputing 190 (2016), 82–94.
|
1156 |
+
[8] Sergey Levine, Chelsea Finn, Trevor Darrell, and Pieter Abbeel. 2016. End-to-end
|
1157 |
+
training of deep visuomotor policies. The Journal of Machine Learning Research
|
1158 |
+
17, 1 (2016), 1334–1373.
|
1159 |
+
[9] Sheng Li, Jayesh K Gupta, Peter Morales, Ross Allen, and Mykel J Kochenderfer.
|
1160 |
+
2021. Deep Implicit Coordination Graphs for Multi-agent Reinforcement Learning.
|
1161 |
+
In Proceedings of the 20th International Conference on Autonomous Agents and
|
1162 |
+
MultiAgent Systems. 764–772.
|
1163 |
+
[10] Zhouhan Lin, Minwei Feng, Cicero Nogueira dos Santos, Mo Yu, Bing Xiang,
|
1164 |
+
Bowen Zhou, and Yoshua Bengio. 2017. A Structured Self-Attentive Sentence
|
1165 |
+
Embedding. In International Conference on Learning Representations.
|
1166 |
+
[11] Ryan Lowe, Yi I Wu, Aviv Tamar, Jean Harb, OpenAI Pieter Abbeel, and Igor
|
1167 |
+
Mordatch. 2017. Multi-agent actor-critic for mixed cooperative-competitive
|
1168 |
+
environments. Advances in neural information processing systems 30 (2017).
|
1169 |
+
[12] Volodymyr Mnih, Koray Kavukcuoglu, David Silver, Andrei A Rusu, Joel Veness,
|
1170 |
+
Marc G Bellemare, Alex Graves, Martin Riedmiller, Andreas K Fidjeland, Georg
|
1171 |
+
Ostrovski, et al. 2015. Human-level control through deep reinforcement learning.
|
1172 |
+
nature 518, 7540 (2015), 529–533.
|
1173 |
+
[13] Frans A Oliehoek and Christopher Amato. 2016. A concise introduction to decen-
|
1174 |
+
tralized POMDPs. Springer.
|
1175 |
+
[14] Georgios Papoudakis, Filippos Christianos, Lukas Schäfer, and Stefano V Albrecht.
|
1176 |
+
2021. Benchmarking Multi-Agent Deep Reinforcement Learning Algorithms in
|
1177 |
+
Cooperative Tasks. In Thirty-fifth Conference on Neural Information Processing
|
1178 |
+
Systems Datasets and Benchmarks Track (Round 1).
|
1179 |
+
[15] Ankur Parikh, Oscar Täckström, Dipanjan Das, and Jakob Uszkoreit. 2016. A
|
1180 |
+
Decomposable Attention Model for Natural Language Inference. In Proceedings
|
1181 |
+
of the 2016 Conference on Empirical Methods in Natural Language Processing.
|
1182 |
+
Association for Computational Linguistics, Austin, Texas, 2249–2255.
|
1183 |
+
[16] Emilio Parisotto, Francis Song, Jack Rae, Razvan Pascanu, Caglar Gulcehre, Sid-
|
1184 |
+
dhant Jayakumar, Max Jaderberg, Raphael Lopez Kaufman, Aidan Clark, Seb
|
1185 |
+
Noury, et al. 2020. Stabilizing transformers for reinforcement learning. In Inter-
|
1186 |
+
national conference on machine learning. PMLR, 7487–7498.
|
1187 |
+
[17] Tabish Rashid, Gregory Farquhar, Bei Peng, and Shimon Whiteson. 2020.
|
1188 |
+
Weighted qmix: Expanding monotonic value function factorisation for deep
|
1189 |
+
multi-agent reinforcement learning. Advances in neural information processing
|
1190 |
+
systems 33 (2020), 10199–10210.
|
1191 |
+
[18] Tabish Rashid, Mikayel Samvelyan, Christian Schroeder, Gregory Farquhar, Jakob
|
1192 |
+
Foerster, and Shimon Whiteson. 2018. Qmix: Monotonic value function factori-
|
1193 |
+
sation for deep multi-agent reinforcement learning. In International conference
|
1194 |
+
on machine learning. PMLR, 4295–4304.
|
1195 |
+
[19] Scott Reed, Konrad Zolna, Emilio Parisotto, Sergio Gomez Colmenarejo, Alexan-
|
1196 |
+
der Novikov, Gabriel Barth-Maron, Mai Gimenez, Yury Sulsky, Jackie Kay, Jost To-
|
1197 |
+
bias Springenberg, et al. 2022. A generalist agent. arXiv preprint arXiv:2205.06175
|
1198 |
+
(2022).
|
1199 |
+
[20] Kyunghwan Son, Daewoo Kim, Wan Ju Kang, David Earl Hostallero, and Yung
|
1200 |
+
Yi. 2019. Qtran: Learning to factorize with transformation for cooperative multi-
|
1201 |
+
agent reinforcement learning. In International conference on machine learning.
|
1202 |
+
PMLR, 5887–5896.
|
1203 |
+
[21] Peter Sunehag, Guy Lever, Audrunas Gruslys, Wojciech Marian Czarnecki, Vini-
|
1204 |
+
cius Zambaldi, Max Jaderberg, Marc Lanctot, Nicolas Sonnerat, Joel Z Leibo, Karl
|
1205 |
+
Tuyls, et al. 2018. Value-Decomposition Networks For Cooperative Multi-Agent
|
1206 |
+
Learning Based On Team Reward. In Proceedings of the 17th International Con-
|
1207 |
+
ference on Autonomous Agents and MultiAgent Systems, (AAMAS 2018), Vol. 3.
|
1208 |
+
2085–2087.
|
1209 |
+
[22] J Terry, Benjamin Black, Nathaniel Grammel, Mario Jayakumar, Ananth Hari,
|
1210 |
+
Ryan Sullivan, Luis S Santos, Clemens Dieffendahl, Caroline Horsch, Rodrigo
|
1211 |
+
Perez-Vicente, et al. 2021. Pettingzoo: Gym for multi-agent reinforcement learn-
|
1212 |
+
ing. Advances in Neural Information Processing Systems 34 (2021), 15032–15043.
|
1213 |
+
[23] Hado Van Hasselt, Arthur Guez, and David Silver. 2016. Deep reinforcement
|
1214 |
+
learning with double q-learning. In Proceedings of the AAAI conference on artificial
|
1215 |
+
intelligence, Vol. 30.
|
1216 |
+
[24] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones,
|
1217 |
+
Aidan N Gomez, Łukasz Kaiser, and Illia Polosukhin. 2017. Attention is all
|
1218 |
+
you need. Advances in neural information processing systems 30 (2017).
|
1219 |
+
[25] Oriol Vinyals, Timo Ewalds, Sergey Bartunov, Petko Georgiev, Alexander Sasha
|
1220 |
+
Vezhnevets, Michelle Yeo, Alireza Makhzani, Heinrich Küttler, John Agapiou,
|
1221 |
+
Julian Schrittwieser, et al. 2017. Starcraft ii: A new challenge for reinforcement
|
1222 |
+
learning. arXiv preprint arXiv:1708.04782 (2017).
|
1223 |
+
[26] Jianhao Wang, Zhizhou Ren, Terry Liu, Yang Yu, and Chongjie Zhang. 2021.
|
1224 |
+
QPLEX: Duplex Dueling Multi-Agent Q-Learning. In 9th International Conference
|
1225 |
+
on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021.
|
1226 |
+
[27] Qiang Wang, Bei Li, Tong Xiao, Jingbo Zhu, Changliang Li, Derek F Wong, and
|
1227 |
+
Lidia S Chao. 2019. Learning Deep Transformer Models for Machine Translation.
|
1228 |
+
In Proceedings of the 57th Annual Meeting of the Association for Computational
|
1229 |
+
Linguistics. 1810–1822.
|
1230 |
+
[28] Christopher JCH Watkins and Peter Dayan. 1992. Q-learning. Machine learning
|
1231 |
+
8, 3 (1992), 279–292.
|
1232 |
+
[29] Bichen Wu, Chenfeng Xu, Xiaoliang Dai, Alvin Wan, Peizhao Zhang, Zhicheng
|
1233 |
+
Yan, Masayoshi Tomizuka, Joseph Gonzalez, Kurt Keutzer, and Peter Vajda. 2020.
|
1234 |
+
Visual transformers: Token-based image representation and processing for com-
|
1235 |
+
puter vision. arXiv preprint arXiv:2006.03677 (2020).
|
1236 |
+
[30] Chao Yu, Akash Velu, Eugene Vinitsky, Yu Wang, Alexandre Bayen, and Yi Wu.
|
1237 |
+
2021. The surprising effectiveness of ppo in cooperative, multi-agent games.
|
1238 |
+
arXiv preprint arXiv:2103.01955 (2021).
|
1239 |
+
[31] Meng Zhou, Ziyu Liu, Pengwei Sui, Yixuan Li, and Yuk Ying Chung. 2020. Learn-
|
1240 |
+
ing implicit credit assignment for cooperative multi-agent reinforcement learning.
|
1241 |
+
Advances in Neural Information Processing Systems 33 (2020), 11853–11864.
|
1242 |
+
|
1243 |
+
TransfQMix: Transformers for Leveraging the Graph Structure of
|
1244 |
+
Multi-Agent Reinforcement Learning Problems
|
1245 |
+
(Supplementary Material)
|
1246 |
+
Matteo Gallici
|
1247 |
+
KEMLG Research Group, Universitat
|
1248 |
+
Politècnica de Catalunya.
|
1249 |
+
Barcelona, Spain
|
1250 |
+
gallici@cs.upc.edu
|
1251 |
+
Mario Martin
|
1252 |
+
KEMLG Research Group, Universitat
|
1253 |
+
Politècnica de Catalunya.
|
1254 |
+
Barcelona, Spain
|
1255 |
+
mmartin@cs.upc.edu
|
1256 |
+
Ivan Masmitja
|
1257 |
+
Institut de Ciències del Mar (ICM),
|
1258 |
+
CSIC
|
1259 |
+
Barcelona, Spain
|
1260 |
+
masmitja@icm.csic.es
|
1261 |
+
ACM Reference Format:
|
1262 |
+
Matteo Gallici, Mario Martin, and Ivan Masmitja. 2023. TransfQMix: Trans-
|
1263 |
+
formers for Leveraging the Graph Structure of Multi-Agent Reinforcement
|
1264 |
+
Learning Problems (Supplementary Material). In PREPRINT VERSION,
|
1265 |
+
accepted at Proc. of the 22nd International Conference on Autonomous Agents
|
1266 |
+
and Multiagent Systems (AAMAS 2023), London, United Kingdom, May 29 –
|
1267 |
+
June 2, 2023, IFAAMAS, 6 pages.
|
1268 |
+
PREPRINT VERSION, accepted at Proc. of the 22nd International Conference on
|
1269 |
+
Autonomous Agents and Multiagent Systems (AAMAS 2023), A. Ricci, W. Yeoh, N. Agmon,
|
1270 |
+
B. An (eds.), May 29 – June 2, 2023, London, United Kingdom. © 2023 International
|
1271 |
+
Foundation for Autonomous Agents and Multiagent Systems (www.ifaamas.org). All
|
1272 |
+
rights reserved.
|
1273 |
+
|
1274 |
+
0
|
1275 |
+
0.5M
|
1276 |
+
1M
|
1277 |
+
1.5M
|
1278 |
+
2M
|
1279 |
+
0.2
|
1280 |
+
0.4
|
1281 |
+
0.6
|
1282 |
+
0.8
|
1283 |
+
1
|
1284 |
+
TransfQmix
|
1285 |
+
Qtran
|
1286 |
+
Qplex
|
1287 |
+
Qmix
|
1288 |
+
OwQmix
|
1289 |
+
CwQmix
|
1290 |
+
Updet
|
1291 |
+
Time Steps
|
1292 |
+
Test Win Rate %
|
1293 |
+
(a) Original optimization parameters.
|
1294 |
+
0
|
1295 |
+
0.5M
|
1296 |
+
1M
|
1297 |
+
1.5M
|
1298 |
+
2M
|
1299 |
+
0.2
|
1300 |
+
0.4
|
1301 |
+
0.6
|
1302 |
+
0.8
|
1303 |
+
1
|
1304 |
+
Time Steps
|
1305 |
+
(b) TransfQMix optimization parameters.
|
1306 |
+
Figure 1: Results obtained in the StarCraft II 5m_vs_6m task using the optimization parameters commonly adopted by state-
|
1307 |
+
of-the-art models (𝑅𝑀𝑆𝑃𝑟𝑜𝑝 optimizer, 0.0005 learning rate, and 0 for td’s 𝜆) and using the optimal TransfQMix optimization
|
1308 |
+
parameters (𝐴𝑑𝑎𝑚 optimizer, 0.001 learning rate, and 0.6 for td’s 𝜆). State-of-the art models do not benefit from the optimization
|
1309 |
+
used by TransfQMix. At the same time, TransfQMix outperforms state-of-the-art methods also when it’s trained using the
|
1310 |
+
state-of-the-art’s configuration. Updet is the only method that benefits from using TransfQMix’s optimizer configuration,
|
1311 |
+
suggesting that these parameters are effective when used to train transformer networks.
|
1312 |
+
Table 1: Parameters of TransfQMix. The parameters relative to the transformer are shared between the transformer agents and
|
1313 |
+
the transformer mixer.
|
1314 |
+
Parameter
|
1315 |
+
Value
|
1316 |
+
Description
|
1317 |
+
Buffer Size
|
1318 |
+
5000
|
1319 |
+
Number of last saved episodes used for training
|
1320 |
+
Batch Size
|
1321 |
+
32
|
1322 |
+
Batch size used for training
|
1323 |
+
Update Interval
|
1324 |
+
200
|
1325 |
+
Episode interval for updating the target network
|
1326 |
+
Optimizer
|
1327 |
+
Adam
|
1328 |
+
Optimizer
|
1329 |
+
Learning Rate
|
1330 |
+
0.001
|
1331 |
+
Learning Rate
|
1332 |
+
Td-Lambda
|
1333 |
+
0.6
|
1334 |
+
Lambda for computing td-targets
|
1335 |
+
Emb Dim
|
1336 |
+
32
|
1337 |
+
Embedding dimension ℎ
|
1338 |
+
Attention Heads
|
1339 |
+
4
|
1340 |
+
Self-attention heads of each transformer block
|
1341 |
+
Transformer Blocks
|
1342 |
+
2
|
1343 |
+
Number of transformer layers
|
1344 |
+
Dropout
|
1345 |
+
0
|
1346 |
+
Dropout percentage in transformer block
|
1347 |
+
Learnable parameters
|
1348 |
+
∼ 50𝑘
|
1349 |
+
Learnable parameters of a single network (mixer or agent)
|
1350 |
+
Table 2: Comparison between the number of parameters (agent and mixer networks) of TransfQMix and other state of the art
|
1351 |
+
models. The number parameters are reported for Spread 3v3, 6v6 and SC2 27m_vs_30m to appreciate their relation with the
|
1352 |
+
number of environment’s entities. The number of parameters of TransfQMix is invariable in respect to the entities. Conversely,
|
1353 |
+
other methods increase their parameters proportionally with the entities, leading to oversized networks in the 27m_vs_30m
|
1354 |
+
task of SC2. TransfQMix is on overall a lighter model than other methods (with the exception of QMix in Spread 3v3 and 6v6).
|
1355 |
+
Model
|
1356 |
+
Agent
|
1357 |
+
Mixer
|
1358 |
+
TransfQMix
|
1359 |
+
50k
|
1360 |
+
50k
|
1361 |
+
QMix
|
1362 |
+
27k
|
1363 |
+
18k
|
1364 |
+
QPlex
|
1365 |
+
27k
|
1366 |
+
251k
|
1367 |
+
O-CWQMix
|
1368 |
+
27k
|
1369 |
+
179k
|
1370 |
+
(a) Spread 3v3
|
1371 |
+
Model
|
1372 |
+
Agent
|
1373 |
+
Mixer
|
1374 |
+
TransfQMix
|
1375 |
+
50k
|
1376 |
+
50k
|
1377 |
+
QMix
|
1378 |
+
28k
|
1379 |
+
56k
|
1380 |
+
QPlex
|
1381 |
+
28k
|
1382 |
+
597k
|
1383 |
+
O-CWQMix
|
1384 |
+
28k
|
1385 |
+
301k
|
1386 |
+
(b) Spread 6v6
|
1387 |
+
Model
|
1388 |
+
Agent
|
1389 |
+
Mixer
|
1390 |
+
TransfQMix
|
1391 |
+
50k
|
1392 |
+
50k
|
1393 |
+
QMix
|
1394 |
+
49k
|
1395 |
+
283k
|
1396 |
+
QPlex
|
1397 |
+
49k
|
1398 |
+
3184k
|
1399 |
+
O-CWQMix
|
1400 |
+
49k
|
1401 |
+
1021k
|
1402 |
+
(c) SC2 27m_vs_30m
|
1403 |
+
|
1404 |
+
0
|
1405 |
+
0.5M
|
1406 |
+
1M
|
1407 |
+
1.5M
|
1408 |
+
2M
|
1409 |
+
−160
|
1410 |
+
−140
|
1411 |
+
−120
|
1412 |
+
−100
|
1413 |
+
−80
|
1414 |
+
−60
|
1415 |
+
−40
|
1416 |
+
−20
|
1417 |
+
0
|
1418 |
+
TransfQmix
|
1419 |
+
Qtran
|
1420 |
+
Qplex
|
1421 |
+
Qmix
|
1422 |
+
OwQmix
|
1423 |
+
CwQmix
|
1424 |
+
Time Steps
|
1425 |
+
Test Reward Mean
|
1426 |
+
(a) 3 Agents, 3 Landmarks
|
1427 |
+
0
|
1428 |
+
0.5M
|
1429 |
+
1M
|
1430 |
+
1.5M
|
1431 |
+
2M
|
1432 |
+
−160
|
1433 |
+
−140
|
1434 |
+
−120
|
1435 |
+
−100
|
1436 |
+
−80
|
1437 |
+
−60
|
1438 |
+
−40
|
1439 |
+
−20
|
1440 |
+
Time Steps
|
1441 |
+
(b) 4 Agents, 4 Landmarks
|
1442 |
+
0
|
1443 |
+
0.5M
|
1444 |
+
1M
|
1445 |
+
1.5M
|
1446 |
+
2M
|
1447 |
+
−200
|
1448 |
+
−150
|
1449 |
+
−100
|
1450 |
+
−50
|
1451 |
+
Time Steps
|
1452 |
+
Test Reward Mean
|
1453 |
+
(c) 5 Agents, 5 Landmarks
|
1454 |
+
0
|
1455 |
+
0.5M
|
1456 |
+
1M
|
1457 |
+
1.5M
|
1458 |
+
2M
|
1459 |
+
−250
|
1460 |
+
−200
|
1461 |
+
−150
|
1462 |
+
−100
|
1463 |
+
−50
|
1464 |
+
Time Steps
|
1465 |
+
(d) 6 Agents, 6 Landmarks
|
1466 |
+
Figure 2: Average reward in Spread performing greedy action selection during training. The global reward is the negative
|
1467 |
+
minimum distances from each landmark to any agent. We used the PettingZoo reward, which is proportional to 1/2𝑛 in respect
|
1468 |
+
to the original one. The results are proportional to the ones based on POL showed in the paper.
|
1469 |
+
|
1470 |
+
0
|
1471 |
+
0.5M
|
1472 |
+
1M
|
1473 |
+
1.5M
|
1474 |
+
2M
|
1475 |
+
0
|
1476 |
+
0.2
|
1477 |
+
0.4
|
1478 |
+
0.6
|
1479 |
+
0.8
|
1480 |
+
1
|
1481 |
+
TransfQmixNoGraphFeats
|
1482 |
+
TransfQmix
|
1483 |
+
Time Steps
|
1484 |
+
Test Occupied Landmarks %
|
1485 |
+
(a) 3 Agents, 3 Landmarks
|
1486 |
+
0
|
1487 |
+
0.5M
|
1488 |
+
1M
|
1489 |
+
1.5M
|
1490 |
+
2M
|
1491 |
+
0
|
1492 |
+
0.2
|
1493 |
+
0.4
|
1494 |
+
0.6
|
1495 |
+
0.8
|
1496 |
+
Time Steps
|
1497 |
+
(b) 4 Agents, 4 Landmarks
|
1498 |
+
0
|
1499 |
+
0.5M
|
1500 |
+
1M
|
1501 |
+
1.5M
|
1502 |
+
2M
|
1503 |
+
0
|
1504 |
+
0.2
|
1505 |
+
0.4
|
1506 |
+
0.6
|
1507 |
+
0.8
|
1508 |
+
Time Steps
|
1509 |
+
Test Occupied Landmarks %
|
1510 |
+
(c) 5 Agents, 5 Landmarks
|
1511 |
+
0
|
1512 |
+
0.5M
|
1513 |
+
1M
|
1514 |
+
1.5M
|
1515 |
+
2M
|
1516 |
+
0
|
1517 |
+
0.2
|
1518 |
+
0.4
|
1519 |
+
0.6
|
1520 |
+
0.8
|
1521 |
+
Time Steps
|
1522 |
+
(d) 6 Agents, 6 Landmarks
|
1523 |
+
Figure 3: POL in Spread tasks performing greedy action during training of TransfQMix using IS_SELF and IS_AGENT vertex
|
1524 |
+
features (TransfQMix) and not using them (TransfQMixNoGraphFeats). Despite how simple they are, these two binary features
|
1525 |
+
allow the transformer to infer which of the entity embeddings are relative to the current agent and which of the other ones are
|
1526 |
+
relative to team-mates. This seems extremely important in order to generate a coherent coordination graph using self-attention.
|
1527 |
+
|
1528 |
+
(a) 3 Agents, 3 Landmarks
|
1529 |
+
(b) 4 Agents, 4 Landmarks
|
1530 |
+
|
1531 |
+
Episode
|
1532 |
+
Reward
|
1533 |
+
Agent_1
|
1534 |
+
.
|
1535 |
+
Agent_2
|
1536 |
+
Agent_3
|
1537 |
+
1.5 -
|
1538 |
+
Landmark_1_real
|
1539 |
+
■
|
1540 |
+
Landmark_2_real
|
1541 |
+
Landmark_3_real
|
1542 |
+
1.0 -
|
1543 |
+
0-
|
1544 |
+
0.5 -
|
1545 |
+
Y Position
|
1546 |
+
Reward
|
1547 |
+
0.0 -
|
1548 |
+
-1-
|
1549 |
+
-0.5 -
|
1550 |
+
-1.0 -
|
1551 |
+
-2 -
|
1552 |
+
-1.5 -
|
1553 |
+
-1.5
|
1554 |
+
-1.0
|
1555 |
+
-0.5
|
1556 |
+
0.0
|
1557 |
+
0.5
|
1558 |
+
1.0
|
1559 |
+
1.5
|
1560 |
+
-5
|
1561 |
+
10
|
1562 |
+
15
|
1563 |
+
20
|
1564 |
+
25
|
1565 |
+
TimestepEpisode
|
1566 |
+
Reward
|
1567 |
+
Agent_1
|
1568 |
+
Agent_2
|
1569 |
+
Agent_3
|
1570 |
+
1.5 -
|
1571 |
+
Agent_4
|
1572 |
+
Landmark_1_real
|
1573 |
+
Landmark_2_real
|
1574 |
+
口
|
1575 |
+
Landmark_3_real
|
1576 |
+
Landmark_4_real
|
1577 |
+
1.0 -
|
1578 |
+
0.5 -
|
1579 |
+
Y Position
|
1580 |
+
0.0 -
|
1581 |
+
Reward
|
1582 |
+
-0.5 -
|
1583 |
+
-1 -
|
1584 |
+
-1.0 -
|
1585 |
+
-1.5 -
|
1586 |
+
-2.0 -
|
1587 |
+
-2.0
|
1588 |
+
-1.5
|
1589 |
+
-1.0
|
1590 |
+
-0.5
|
1591 |
+
0
|
1592 |
+
0.5
|
1593 |
+
1.0
|
1594 |
+
1.5
|
1595 |
+
10
|
1596 |
+
15
|
1597 |
+
20
|
1598 |
+
25
|
1599 |
+
Timestep(c) 5 Agents, 5 Landmarks
|
1600 |
+
(d) 6 Agents, 6 Landmarks
|
1601 |
+
Figure 4: Some examples of the learned policies in the Spread tasks, using TransfQMix trained in the 4v4 scenario. The smoothed
|
1602 |
+
circles represent the trajectories of the agents. The full-filled circles represent their positions at the end of the episode. The
|
1603 |
+
green line in the right figures is the evolution of the global reward during the episode.
|
1604 |
+
|
1605 |
+
Episode
|
1606 |
+
Reward
|
1607 |
+
Agent_1
|
1608 |
+
.
|
1609 |
+
1.5 -
|
1610 |
+
Agent_2
|
1611 |
+
O
|
1612 |
+
Agent_3
|
1613 |
+
Agent_4
|
1614 |
+
Agent_5
|
1615 |
+
Landmark_1_real
|
1616 |
+
Landmark_2_real
|
1617 |
+
1.0 -
|
1618 |
+
Landmark_3_real
|
1619 |
+
Landmark_4_real
|
1620 |
+
Landmark_5_real
|
1621 |
+
0.
|
1622 |
+
0.5 -
|
1623 |
+
Y Position
|
1624 |
+
0.0 -
|
1625 |
+
Reward
|
1626 |
+
-0.5 -
|
1627 |
+
-1 -
|
1628 |
+
-1.0 -
|
1629 |
+
-1.5 -
|
1630 |
+
-1.5
|
1631 |
+
-1.0
|
1632 |
+
-0.5
|
1633 |
+
0.0
|
1634 |
+
0.5
|
1635 |
+
1.0
|
1636 |
+
1.5
|
1637 |
+
-5
|
1638 |
+
10
|
1639 |
+
15
|
1640 |
+
20
|
1641 |
+
25
|
1642 |
+
TimestepEpisode
|
1643 |
+
Reward
|
1644 |
+
Agent_1
|
1645 |
+
.
|
1646 |
+
Agent_2
|
1647 |
+
Agent_3
|
1648 |
+
1.5 -
|
1649 |
+
Agent_4
|
1650 |
+
- 0
|
1651 |
+
Agent_5
|
1652 |
+
Agent_6
|
1653 |
+
Landmark_1_real
|
1654 |
+
Landmark_2_real
|
1655 |
+
1.0 -
|
1656 |
+
Landmark_3_real
|
1657 |
+
■
|
1658 |
+
Landmark_4_real
|
1659 |
+
■
|
1660 |
+
Landmark_5_real
|
1661 |
+
-1 -
|
1662 |
+
8
|
1663 |
+
0.5 -
|
1664 |
+
Y Position
|
1665 |
+
0.0 -
|
1666 |
+
-0.5 -
|
1667 |
+
-3 -
|
1668 |
+
-1.0 -
|
1669 |
+
-4
|
1670 |
+
-1.5 -
|
1671 |
+
2.0 -
|
1672 |
+
2.0
|
1673 |
+
-1.5
|
1674 |
+
-1.0
|
1675 |
+
-0.5
|
1676 |
+
0.0
|
1677 |
+
0.5
|
1678 |
+
1.0
|
1679 |
+
1.5
|
1680 |
+
-5
|
1681 |
+
10
|
1682 |
+
15
|
1683 |
+
20
|
1684 |
+
25
|
1685 |
+
Timestep
|
EtE4T4oBgHgl3EQf6w5f/content/tmp_files/load_file.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
G9E1T4oBgHgl3EQfXQT3/vector_store/index.faiss
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9ccfcc561abfd1618344e559fb46ec50ae677602bdff0e966ca6548df8a5245e
|
3 |
+
size 11403309
|
G9E1T4oBgHgl3EQfXQT3/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8684b606d780e49b3264e8a6f2f22da75c6d92eaffeb2f786297a5256a53f348
|
3 |
+
size 377364
|
H9FJT4oBgHgl3EQfFSyX/content/2301.11442v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d22f9391e5d080e36d1eb907b426913efcb4ee3a709ecbee5c069a3de24479dc
|
3 |
+
size 250646
|
H9FJT4oBgHgl3EQfFSyX/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:98d416b793970f631f4ad26dbf88e977e6a2154494c965c82df74f74da742fdd
|
3 |
+
size 126305
|
HtA0T4oBgHgl3EQfB_9Y/vector_store/index.faiss
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:43ba989b6fb01cf054166ce700251cd2145d4a2c7eaffe57e47477f6e082d614
|
3 |
+
size 3866669
|
K9E1T4oBgHgl3EQfYwRv/vector_store/index.faiss
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ac0aa519ab2790f16dbd7a18983e43db3c08853494b2f7a62775623ff30bf9bc
|
3 |
+
size 3670061
|
LtE2T4oBgHgl3EQfBAZB/vector_store/index.faiss
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fa67d9441905108534e55aff44dcd485e3ff1d9d76fa049866546abc31a0f76c
|
3 |
+
size 262189
|
LtE3T4oBgHgl3EQfAwm8/content/2301.04261v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a1c4b7ddb22ae78ca045338c30c4be941707bd41670a0bdf10d092ff6f81133d
|
3 |
+
size 3551218
|
NNE2T4oBgHgl3EQfqwgI/content/2301.04041v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:10bbfd3acdc5ddd543263bb0ea9048719756ed21eca3b0ddd978234f2453573e
|
3 |
+
size 2593704
|
OtAzT4oBgHgl3EQfzf59/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b29f32fa2075d9820cae588f124e0f6a2f2da05b62b33cf322d7fa663136a6f9
|
3 |
+
size 85363
|
PNFKT4oBgHgl3EQfgS6S/content/2301.11833v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:21695979b7c1a5e69ef71e4e174634a623ca83fbb7e4914bf2bc8ccb6806e7cc
|
3 |
+
size 3383083
|